mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-30 14:52:17 +00:00
phase 7
This commit is contained in:
parent
d0034bac99
commit
bdb2b1eed6
@ -50,7 +50,7 @@ struct arp_pkt {
|
||||
#pragma pack()
|
||||
|
||||
/* Forward declaration */
|
||||
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
|
||||
static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
|
||||
bool strict_match);
|
||||
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
|
||||
static void rlb_src_unlink(struct bonding *bond, u32 index);
|
||||
@ -353,7 +353,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
|
||||
*
|
||||
* Caller must hold RTNL
|
||||
*/
|
||||
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
|
||||
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond,
|
||||
const u8 addr[])
|
||||
{
|
||||
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
|
||||
|
||||
@ -904,7 +905,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
|
||||
|
||||
/*********************** tlb/rlb shared functions *********************/
|
||||
|
||||
static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
|
||||
static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[],
|
||||
__be16 vlan_proto, u16 vid)
|
||||
{
|
||||
struct learning_pkt pkt;
|
||||
@ -940,7 +941,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
|
||||
struct alb_walk_data {
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
u8 *mac_addr;
|
||||
const u8 *mac_addr;
|
||||
bool strict_match;
|
||||
};
|
||||
|
||||
@ -949,9 +950,9 @@ static int alb_upper_dev_walk(struct net_device *upper,
|
||||
{
|
||||
struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
|
||||
bool strict_match = data->strict_match;
|
||||
const u8 *mac_addr = data->mac_addr;
|
||||
struct bonding *bond = data->bond;
|
||||
struct slave *slave = data->slave;
|
||||
u8 *mac_addr = data->mac_addr;
|
||||
struct bond_vlan_tag *tags;
|
||||
|
||||
if (is_vlan_dev(upper) &&
|
||||
@ -982,7 +983,7 @@ static int alb_upper_dev_walk(struct net_device *upper,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
|
||||
static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
|
||||
bool strict_match)
|
||||
{
|
||||
struct bonding *bond = bond_get_bond_by_slave(slave);
|
||||
@ -1006,14 +1007,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
|
||||
static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
|
||||
unsigned int len)
|
||||
{
|
||||
struct net_device *dev = slave->dev;
|
||||
struct sockaddr_storage ss;
|
||||
|
||||
if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
|
||||
memcpy(dev->dev_addr, addr, len);
|
||||
__dev_addr_set(dev, addr, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1242,8 +1243,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
|
||||
res = dev_set_mac_address(slave->dev, addr, NULL);
|
||||
|
||||
/* restore net_device's hw address */
|
||||
bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
|
||||
slave->dev->addr_len);
|
||||
dev_addr_set(slave->dev, tmp_addr);
|
||||
|
||||
if (res)
|
||||
goto unwind;
|
||||
@ -1263,8 +1263,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
|
||||
rollback_slave->dev->addr_len);
|
||||
dev_set_mac_address(rollback_slave->dev,
|
||||
(struct sockaddr *)&ss, NULL);
|
||||
bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
|
||||
rollback_slave->dev->addr_len);
|
||||
dev_addr_set(rollback_slave->dev, tmp_addr);
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -1729,8 +1728,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
|
||||
dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
|
||||
NULL);
|
||||
|
||||
bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
|
||||
new_slave->dev->addr_len);
|
||||
dev_addr_set(new_slave->dev, tmp_addr);
|
||||
}
|
||||
|
||||
/* curr_active_slave must be set before calling alb_swap_mac_addr */
|
||||
@ -1763,7 +1761,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
|
||||
dev_addr_set(bond_dev, ss->__data);
|
||||
|
||||
/* If there is no curr_active_slave there is nothing else to do.
|
||||
* Otherwise we'll need to pass the new address to it and handle
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/ioport.h>
|
||||
@ -71,6 +72,7 @@
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/if_bonding.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <net/route.h>
|
||||
@ -923,7 +925,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
|
||||
__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
|
||||
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
|
||||
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
|
||||
return 0;
|
||||
@ -1460,7 +1462,7 @@ static void bond_compute_features(struct bonding *bond)
|
||||
bond_dev->hw_enc_features |= xfrm_features;
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
bond_dev->mpls_features = mpls_features;
|
||||
bond_dev->gso_max_segs = gso_max_segs;
|
||||
netif_set_gso_max_segs(bond_dev, gso_max_segs);
|
||||
netif_set_gso_max_size(bond_dev, gso_max_size);
|
||||
|
||||
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
@ -3128,8 +3130,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
|
||||
* when the source ip is 0, so don't take the link down
|
||||
* if we don't know our ip yet
|
||||
*/
|
||||
if (!bond_time_in_interval(bond, trans_start, 2) ||
|
||||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
|
||||
if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
|
||||
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
slave_state_changed = 1;
|
||||
@ -3223,7 +3225,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||||
|
||||
/* Backup slave is down if:
|
||||
* - No current_arp_slave AND
|
||||
* - more than 3*delta since last receive AND
|
||||
* - more than (missed_max+1)*delta since last receive AND
|
||||
* - the bond has an IP address
|
||||
*
|
||||
* Note: a non-null current_arp_slave indicates
|
||||
@ -3235,20 +3237,20 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||||
*/
|
||||
if (!bond_is_active_slave(slave) &&
|
||||
!rcu_access_pointer(bond->current_arp_slave) &&
|
||||
!bond_time_in_interval(bond, last_rx, 3)) {
|
||||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
commit++;
|
||||
}
|
||||
|
||||
/* Active slave is down if:
|
||||
* - more than 2*delta since transmitting OR
|
||||
* - (more than 2*delta since receive AND
|
||||
* - more than missed_max*delta since transmitting OR
|
||||
* - (more than missed_max*delta since receive AND
|
||||
* the bond has an IP address)
|
||||
*/
|
||||
trans_start = dev_trans_start(slave->dev);
|
||||
if (bond_is_active_slave(slave) &&
|
||||
(!bond_time_in_interval(bond, trans_start, 2) ||
|
||||
!bond_time_in_interval(bond, last_rx, 2))) {
|
||||
(!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
commit++;
|
||||
}
|
||||
@ -4090,7 +4092,11 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct mii_ioctl_data *mii = NULL;
|
||||
int res;
|
||||
const struct net_device_ops *ops;
|
||||
struct net_device *real_dev;
|
||||
struct hwtstamp_config cfg;
|
||||
struct ifreq ifrr;
|
||||
int res = 0;
|
||||
|
||||
netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
|
||||
|
||||
@ -4116,7 +4122,40 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
|
||||
mii->val_out = BMSR_LSTATUS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
break;
|
||||
case SIOCSHWTSTAMP:
|
||||
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
fallthrough;
|
||||
case SIOCGHWTSTAMP:
|
||||
real_dev = bond_option_active_slave_get_rcu(bond);
|
||||
if (!real_dev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
|
||||
ifrr.ifr_ifru = ifr->ifr_ifru;
|
||||
|
||||
ops = real_dev->netdev_ops;
|
||||
if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
|
||||
res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
ifr->ifr_ifru = ifrr.ifr_ifru;
|
||||
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Set the BOND_PHC_INDEX flag to notify user space */
|
||||
cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
|
||||
|
||||
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
|
||||
-EFAULT : 0;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
res = -EOPNOTSUPP;
|
||||
}
|
||||
@ -4413,7 +4452,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
|
||||
}
|
||||
|
||||
/* success */
|
||||
memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
|
||||
dev_addr_set(bond_dev, ss->__data);
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
@ -5332,10 +5371,38 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
|
||||
BOND_ABI_VERSION);
|
||||
}
|
||||
|
||||
static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
|
||||
struct ethtool_ts_info *info)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
const struct ethtool_ops *ops;
|
||||
struct net_device *real_dev;
|
||||
struct phy_device *phydev;
|
||||
|
||||
real_dev = bond_option_active_slave_get_rcu(bond);
|
||||
if (real_dev) {
|
||||
ops = real_dev->ethtool_ops;
|
||||
phydev = real_dev->phydev;
|
||||
|
||||
if (phy_has_tsinfo(phydev)) {
|
||||
return phy_ts_info(phydev, info);
|
||||
} else if (ops->get_ts_info) {
|
||||
return ops->get_ts_info(real_dev, info);
|
||||
}
|
||||
}
|
||||
|
||||
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_SOFTWARE;
|
||||
info->phc_index = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops bond_ethtool_ops = {
|
||||
.get_drvinfo = bond_ethtool_get_drvinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_link_ksettings = bond_ethtool_get_link_ksettings,
|
||||
.get_ts_info = bond_ethtool_get_ts_info,
|
||||
};
|
||||
|
||||
static const struct net_device_ops bond_netdev_ops = {
|
||||
@ -5835,6 +5902,7 @@ static int bond_check_params(struct bond_params *params)
|
||||
params->arp_interval = arp_interval;
|
||||
params->arp_validate = arp_validate_value;
|
||||
params->arp_all_targets = arp_all_targets_value;
|
||||
params->missed_max = 2;
|
||||
params->updelay = updelay;
|
||||
params->downdelay = downdelay;
|
||||
params->peer_notif_delay = 0;
|
||||
|
@ -110,6 +110,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
|
||||
.len = ETH_ALEN },
|
||||
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
|
||||
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
|
||||
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
|
||||
@ -453,6 +454,15 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data[IFLA_BOND_MISSED_MAX]) {
|
||||
int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
|
||||
|
||||
bond_opt_initval(&newval, missed_max);
|
||||
err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -515,6 +525,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
|
||||
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
|
||||
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
|
||||
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
|
||||
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
|
||||
0;
|
||||
}
|
||||
|
||||
@ -650,6 +661,10 @@ static int bond_fill_info(struct sk_buff *skb,
|
||||
bond->params.tlb_dynamic_lb))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
|
||||
bond->params.missed_max))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
struct ad_info info;
|
||||
|
||||
|
@ -78,6 +78,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
static int bond_option_ad_user_port_key_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
static int bond_option_missed_max_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
|
||||
|
||||
static const struct bond_opt_value bond_mode_tbl[] = {
|
||||
@ -213,6 +215,13 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
|
||||
{ NULL, -1, 0},
|
||||
};
|
||||
|
||||
static const struct bond_opt_value bond_missed_max_tbl[] = {
|
||||
{ "minval", 1, BOND_VALFLAG_MIN},
|
||||
{ "maxval", 255, BOND_VALFLAG_MAX},
|
||||
{ "default", 2, BOND_VALFLAG_DEFAULT},
|
||||
{ NULL, -1, 0},
|
||||
};
|
||||
|
||||
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
|
||||
[BOND_OPT_MODE] = {
|
||||
.id = BOND_OPT_MODE,
|
||||
@ -270,6 +279,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
|
||||
.values = bond_intmax_tbl,
|
||||
.set = bond_option_arp_interval_set
|
||||
},
|
||||
[BOND_OPT_MISSED_MAX] = {
|
||||
.id = BOND_OPT_MISSED_MAX,
|
||||
.name = "arp_missed_max",
|
||||
.desc = "Maximum number of missed ARP interval",
|
||||
.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
|
||||
BIT(BOND_MODE_ALB),
|
||||
.values = bond_missed_max_tbl,
|
||||
.set = bond_option_missed_max_set
|
||||
},
|
||||
[BOND_OPT_ARP_TARGETS] = {
|
||||
.id = BOND_OPT_ARP_TARGETS,
|
||||
.name = "arp_ip_target",
|
||||
@ -1186,6 +1204,16 @@ static int bond_option_arp_all_targets_set(struct bonding *bond,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bond_option_missed_max_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval)
|
||||
{
|
||||
netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
|
||||
newval->string, newval->value);
|
||||
bond->params.missed_max = newval->value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bond_option_primary_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval)
|
||||
{
|
||||
|
@ -11,7 +11,7 @@
|
||||
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
loff_t off = 0;
|
||||
@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
|
||||
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
bool found = false;
|
||||
@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
|
||||
|
||||
static void bond_info_show_master(struct seq_file *seq)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
const struct bond_opt_value *optval;
|
||||
struct slave *curr, *primary;
|
||||
int i;
|
||||
@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq)
|
||||
|
||||
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
|
||||
bond->params.arp_interval);
|
||||
seq_printf(seq, "ARP Missed Max: %u\n",
|
||||
bond->params.missed_max);
|
||||
|
||||
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
|
||||
|
||||
@ -173,7 +175,7 @@ static void bond_info_show_master(struct seq_file *seq)
|
||||
static void bond_info_show_slave(struct seq_file *seq,
|
||||
const struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
|
||||
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
|
||||
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
|
||||
|
@ -303,6 +303,18 @@ static ssize_t bonding_show_arp_targets(struct device *d,
|
||||
static DEVICE_ATTR(arp_ip_target, 0644,
|
||||
bonding_show_arp_targets, bonding_sysfs_store_option);
|
||||
|
||||
/* Show the arp missed max. */
|
||||
static ssize_t bonding_show_missed_max(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct bonding *bond = to_bond(d);
|
||||
|
||||
return sprintf(buf, "%u\n", bond->params.missed_max);
|
||||
}
|
||||
static DEVICE_ATTR(arp_missed_max, 0644,
|
||||
bonding_show_missed_max, bonding_sysfs_store_option);
|
||||
|
||||
/* Show the up and down delays. */
|
||||
static ssize_t bonding_show_downdelay(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
@ -779,6 +791,7 @@ static struct attribute *per_bond_attrs[] = {
|
||||
&dev_attr_ad_actor_sys_prio.attr,
|
||||
&dev_attr_ad_actor_system.attr,
|
||||
&dev_attr_ad_user_port_key.attr,
|
||||
&dev_attr_arp_missed_max.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -811,8 +824,8 @@ int bond_create_sysfs(struct bond_net *bn)
|
||||
*/
|
||||
if (ret == -EEXIST) {
|
||||
/* Is someone being kinky and naming a device bonding_master? */
|
||||
if (__dev_get_by_name(bn->net,
|
||||
class_attr_bonding_masters.attr.name))
|
||||
if (netdev_name_in_use(bn->net,
|
||||
class_attr_bonding_masters.attr.name))
|
||||
pr_err("network device named %s already exists in sysfs\n",
|
||||
class_attr_bonding_masters.attr.name);
|
||||
ret = 0;
|
||||
|
@ -16,7 +16,7 @@ obj-y += softing/
|
||||
obj-$(CONFIG_CAN_AT91) += at91_can.o
|
||||
obj-$(CONFIG_CAN_CC770) += cc770/
|
||||
obj-$(CONFIG_CAN_C_CAN) += c_can/
|
||||
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
|
||||
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
|
||||
obj-$(CONFIG_CAN_GRCAN) += grcan.o
|
||||
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
|
||||
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
|
||||
|
@ -448,7 +448,6 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
|
||||
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct at91_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
unsigned int mb, prio;
|
||||
u32 reg_mid, reg_mcr;
|
||||
@ -480,8 +479,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* This triggers transmission */
|
||||
at91_write(priv, AT91_MCR(mb), reg_mcr);
|
||||
|
||||
stats->tx_bytes += cf->len;
|
||||
|
||||
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
|
||||
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
|
||||
|
||||
@ -553,8 +550,6 @@ static void at91_rx_overflow_err(struct net_device *dev)
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
@ -619,7 +614,9 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
|
||||
at91_read_mb(dev, mb, cf);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
can_led_event(dev, CAN_LED_EVENT_RX);
|
||||
@ -779,8 +776,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
|
||||
|
||||
at91_poll_err_frame(dev, cf, reg_sr);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
@ -854,7 +849,10 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
|
||||
if (likely(reg_msr & AT91_MSR_MRDY &&
|
||||
~reg_msr & AT91_MSR_MABT)) {
|
||||
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
|
||||
can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
|
||||
dev->stats.tx_bytes +=
|
||||
can_get_echo_skb(dev,
|
||||
mb - get_mb_tx_first(priv),
|
||||
NULL);
|
||||
dev->stats.tx_packets++;
|
||||
can_led_event(dev, CAN_LED_EVENT_TX);
|
||||
}
|
||||
@ -1037,8 +1035,6 @@ static void at91_irq_err(struct net_device *dev)
|
||||
|
||||
at91_irq_err_state(dev, cf, new_state);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
priv->can.state = new_state;
|
||||
@ -1170,9 +1166,9 @@ static ssize_t mb0_id_show(struct device *dev,
|
||||
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
|
||||
|
||||
if (priv->mb0_id & CAN_EFF_FLAG)
|
||||
return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
|
||||
return sysfs_emit(buf, "0x%08x\n", priv->mb0_id);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
|
||||
return sysfs_emit(buf, "0x%03x\n", priv->mb0_id);
|
||||
}
|
||||
|
||||
static ssize_t mb0_id_store(struct device *dev,
|
||||
|
@ -211,7 +211,6 @@ struct c_can_priv {
|
||||
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
|
||||
void (*raminit)(const struct c_can_priv *priv, bool enable);
|
||||
u32 comm_rcv_high;
|
||||
u32 dlc[];
|
||||
};
|
||||
|
||||
struct net_device *alloc_c_can_dev(int msg_obj_num);
|
||||
|
@ -20,7 +20,9 @@ static void c_can_get_drvinfo(struct net_device *netdev,
|
||||
}
|
||||
|
||||
static void c_can_get_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kernel_ring,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(netdev);
|
||||
|
||||
|
@ -403,10 +403,10 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
|
||||
frame->data[i + 1] = data >> 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats->rx_bytes += frame->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += frame->len;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
return 0;
|
||||
@ -477,7 +477,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
||||
* transmit as we might race against do_tx().
|
||||
*/
|
||||
c_can_setup_tx_object(dev, IF_TX, frame, idx);
|
||||
priv->dlc[idx] = frame->len;
|
||||
can_put_echo_skb(skb, dev, idx, 0);
|
||||
obj = idx + priv->msg_obj_tx_first;
|
||||
c_can_object_put(dev, IF_TX, obj, cmd);
|
||||
@ -742,8 +741,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
* NAPI. We are not transmitting.
|
||||
*/
|
||||
c_can_inval_tx_object(dev, IF_NAPI, obj);
|
||||
can_get_echo_skb(dev, idx, NULL);
|
||||
bytes += priv->dlc[idx];
|
||||
bytes += can_get_echo_skb(dev, idx, NULL);
|
||||
pkts++;
|
||||
}
|
||||
|
||||
@ -920,7 +918,6 @@ static int c_can_handle_state_change(struct net_device *dev,
|
||||
unsigned int reg_err_counter;
|
||||
unsigned int rx_err_passive;
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
struct can_berr_counter bec;
|
||||
@ -996,8 +993,6 @@ static int c_can_handle_state_change(struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
@ -1064,8 +1059,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
return 1;
|
||||
}
|
||||
@ -1232,8 +1225,7 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
|
||||
struct c_can_priv *priv;
|
||||
int msg_obj_tx_num = msg_obj_num / 2;
|
||||
|
||||
dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num),
|
||||
msg_obj_tx_num);
|
||||
dev = alloc_candev(sizeof(*priv), msg_obj_tx_num);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
|
@ -489,17 +489,17 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
|
||||
cf->len = can_cc_dlc2len((config & 0xf0) >> 4);
|
||||
for (i = 0; i < cf->len; i++)
|
||||
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
|
||||
}
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
static int cc770_err(struct net_device *dev, u8 status)
|
||||
{
|
||||
struct cc770_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
u8 lec;
|
||||
@ -571,8 +571,6 @@ static int cc770_err(struct net_device *dev, u8 status)
|
||||
}
|
||||
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
@ -666,7 +664,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
|
||||
struct cc770_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
unsigned int mo = obj2msgobj(o);
|
||||
struct can_frame *cf;
|
||||
u8 ctrl1;
|
||||
|
||||
ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
|
||||
@ -698,12 +695,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
|
||||
return;
|
||||
}
|
||||
|
||||
cf = (struct can_frame *)priv->tx_skb->data;
|
||||
stats->tx_bytes += cf->len;
|
||||
stats->tx_packets++;
|
||||
|
||||
can_put_echo_skb(priv->tx_skb, dev, 0, 0);
|
||||
can_get_echo_skb(dev, 0, NULL);
|
||||
stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
|
||||
stats->tx_packets++;
|
||||
priv->tx_skb = NULL;
|
||||
|
||||
netif_wake_queue(dev);
|
||||
|
@ -4,6 +4,7 @@
|
||||
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
|
||||
*/
|
||||
|
||||
#include <linux/units.h>
|
||||
#include <linux/can/dev.h>
|
||||
|
||||
#ifdef CONFIG_CAN_CALC_BITTIMING
|
||||
@ -81,9 +82,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
if (bt->sample_point) {
|
||||
sample_point_nominal = bt->sample_point;
|
||||
} else {
|
||||
if (bt->bitrate > 800 * CAN_KBPS)
|
||||
if (bt->bitrate > 800 * KILO /* BPS */)
|
||||
sample_point_nominal = 750;
|
||||
else if (bt->bitrate > 500 * CAN_KBPS)
|
||||
else if (bt->bitrate > 500 * KILO /* BPS */)
|
||||
sample_point_nominal = 800;
|
||||
else
|
||||
sample_point_nominal = 875;
|
||||
@ -175,27 +176,29 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void can_calc_tdco(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
const struct can_bittiming *dbt = &priv->data_bittiming;
|
||||
struct can_tdc *tdc = &priv->tdc;
|
||||
const struct can_tdc_const *tdc_const = priv->tdc_const;
|
||||
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
|
||||
const struct can_bittiming *dbt,
|
||||
u32 *ctrlmode, u32 ctrlmode_supported)
|
||||
|
||||
if (!tdc_const)
|
||||
{
|
||||
if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
|
||||
return;
|
||||
|
||||
*ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
|
||||
|
||||
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
|
||||
* delay compensation" (TDC) is only applicable if data BRP is
|
||||
* one or two.
|
||||
*/
|
||||
if (dbt->brp == 1 || dbt->brp == 2) {
|
||||
/* Reuse "normal" sample point and convert it to time quanta */
|
||||
u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000;
|
||||
/* Sample point in clock periods */
|
||||
u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
|
||||
dbt->phase_seg1) * dbt->brp;
|
||||
|
||||
tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max);
|
||||
} else {
|
||||
tdc->tdco = 0;
|
||||
if (sample_point_in_tc < tdc_const->tdco_min)
|
||||
return;
|
||||
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
|
||||
*ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_CAN_CALC_BITTIMING */
|
||||
|
@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(can_change_state);
|
||||
static void can_restart(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
int err;
|
||||
@ -155,9 +154,6 @@ static void can_restart(struct net_device *dev)
|
||||
|
||||
cf->can_id |= CAN_ERR_RESTARTED;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx_ni(skb);
|
||||
|
||||
restart:
|
||||
@ -300,6 +296,7 @@ EXPORT_SYMBOL_GPL(free_candev);
|
||||
int can_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
u32 ctrlmode_static = can_get_static_ctrlmode(priv);
|
||||
|
||||
/* Do not allow changing the MTU while running */
|
||||
if (dev->flags & IFF_UP)
|
||||
@ -309,7 +306,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
|
||||
switch (new_mtu) {
|
||||
case CAN_MTU:
|
||||
/* 'CANFD-only' controllers can not switch to CAN_MTU */
|
||||
if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
|
||||
if (ctrlmode_static & CAN_CTRLMODE_FD)
|
||||
return -EINVAL;
|
||||
|
||||
priv->ctrlmode &= ~CAN_CTRLMODE_FD;
|
||||
@ -318,7 +315,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
|
||||
case CANFD_MTU:
|
||||
/* check for potential CANFD ability */
|
||||
if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
|
||||
!(priv->ctrlmode_static & CAN_CTRLMODE_FD))
|
||||
!(ctrlmode_static & CAN_CTRLMODE_FD))
|
||||
return -EINVAL;
|
||||
|
||||
priv->ctrlmode |= CAN_CTRLMODE_FD;
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
|
||||
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
|
||||
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
|
||||
* Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
|
||||
*/
|
||||
|
||||
#include <linux/can/dev.h>
|
||||
@ -19,6 +20,20 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
|
||||
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
|
||||
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
|
||||
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
|
||||
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
|
||||
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
|
||||
[IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 },
|
||||
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
@ -30,6 +45,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
* - nominal/arbitration bittiming
|
||||
* - data bittiming
|
||||
* - control mode with CAN_CTRLMODE_FD set
|
||||
* - TDC parameters are coherent (details below)
|
||||
*/
|
||||
|
||||
if (!data)
|
||||
@ -37,8 +53,43 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
||||
if (data[IFLA_CAN_CTRLMODE]) {
|
||||
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
|
||||
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
|
||||
|
||||
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
|
||||
|
||||
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
|
||||
if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
|
||||
return -EOPNOTSUPP;
|
||||
/* If one of the CAN_CTRLMODE_TDC_* flag is set then
|
||||
* TDC must be set and vice-versa
|
||||
*/
|
||||
if (!!tdc_flags != !!data[IFLA_CAN_TDC])
|
||||
return -EOPNOTSUPP;
|
||||
/* If providing TDC parameters, at least TDCO is
|
||||
* needed. TDCV is needed if and only if
|
||||
* CAN_CTRLMODE_TDC_MANUAL is set
|
||||
*/
|
||||
if (data[IFLA_CAN_TDC]) {
|
||||
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
|
||||
int err;
|
||||
|
||||
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
|
||||
data[IFLA_CAN_TDC],
|
||||
can_tdc_policy, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
|
||||
if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
|
||||
return -EOPNOTSUPP;
|
||||
} else {
|
||||
if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!tb_tdc[IFLA_CAN_TDC_TDCO])
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_can_fd) {
|
||||
@ -46,7 +97,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (data[IFLA_CAN_DATA_BITTIMING]) {
|
||||
if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
|
||||
if (!is_can_fd)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -54,11 +105,60 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
|
||||
struct can_tdc tdc = { 0 };
|
||||
const struct can_tdc_const *tdc_const = priv->tdc_const;
|
||||
int err;
|
||||
|
||||
if (!tdc_const || !can_tdc_is_enabled(priv))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
|
||||
can_tdc_policy, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
|
||||
u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]);
|
||||
|
||||
if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max)
|
||||
return -EINVAL;
|
||||
|
||||
tdc.tdcv = tdcv;
|
||||
}
|
||||
|
||||
if (tb_tdc[IFLA_CAN_TDC_TDCO]) {
|
||||
u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]);
|
||||
|
||||
if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max)
|
||||
return -EINVAL;
|
||||
|
||||
tdc.tdco = tdco;
|
||||
}
|
||||
|
||||
if (tb_tdc[IFLA_CAN_TDC_TDCF]) {
|
||||
u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]);
|
||||
|
||||
if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max)
|
||||
return -EINVAL;
|
||||
|
||||
tdc.tdcf = tdcf;
|
||||
}
|
||||
|
||||
priv->tdc = tdc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int can_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
u32 tdc_mask = 0;
|
||||
int err;
|
||||
|
||||
/* We need synchronization with dev->stop() */
|
||||
@ -112,7 +212,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
if (dev->flags & IFF_UP)
|
||||
return -EBUSY;
|
||||
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
|
||||
ctrlstatic = priv->ctrlmode_static;
|
||||
ctrlstatic = can_get_static_ctrlmode(priv);
|
||||
maskedflags = cm->flags & cm->mask;
|
||||
|
||||
/* check whether provided bits are allowed to be passed */
|
||||
@ -138,7 +238,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
dev->mtu = CAN_MTU;
|
||||
memset(&priv->data_bittiming, 0,
|
||||
sizeof(priv->data_bittiming));
|
||||
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
|
||||
memset(&priv->tdc, 0, sizeof(priv->tdc));
|
||||
}
|
||||
|
||||
tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
|
||||
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
|
||||
* exclusive: make sure to turn the other one off
|
||||
*/
|
||||
if (tdc_mask)
|
||||
priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
|
||||
}
|
||||
|
||||
if (data[IFLA_CAN_RESTART_MS]) {
|
||||
@ -187,9 +296,26 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
|
||||
memset(&priv->tdc, 0, sizeof(priv->tdc));
|
||||
if (data[IFLA_CAN_TDC]) {
|
||||
/* TDC parameters are provided: use them */
|
||||
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
|
||||
extack);
|
||||
if (err) {
|
||||
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
|
||||
return err;
|
||||
}
|
||||
} else if (!tdc_mask) {
|
||||
/* Neither of TDC parameters nor TDC flags are
|
||||
* provided: do calculation
|
||||
*/
|
||||
can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
|
||||
&priv->ctrlmode, priv->ctrlmode_supported);
|
||||
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
|
||||
* turned off. TDC is disabled: do nothing
|
||||
*/
|
||||
|
||||
can_calc_tdco(dev);
|
||||
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
|
||||
|
||||
if (priv->do_set_data_bittiming) {
|
||||
/* Finally, set the bit-timing registers */
|
||||
@ -226,6 +352,44 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t can_tdc_get_size(const struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
size_t size;
|
||||
|
||||
if (!priv->tdc_const)
|
||||
return 0;
|
||||
|
||||
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
|
||||
if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
|
||||
}
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
|
||||
if (priv->tdc_const->tdcf_max) {
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
|
||||
}
|
||||
|
||||
if (can_tdc_is_enabled(priv)) {
|
||||
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
|
||||
priv->do_get_auto_tdcv)
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
|
||||
if (priv->tdc_const->tdcf_max)
|
||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static size_t can_ctrlmode_ext_get_size(void)
|
||||
{
|
||||
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
|
||||
nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
|
||||
}
|
||||
|
||||
static size_t can_get_size(const struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
@ -257,10 +421,84 @@ static size_t can_get_size(const struct net_device *dev)
|
||||
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
|
||||
priv->data_bitrate_const_cnt);
|
||||
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
|
||||
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
|
||||
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct nlattr *nest;
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
struct can_tdc *tdc = &priv->tdc;
|
||||
const struct can_tdc_const *tdc_const = priv->tdc_const;
|
||||
|
||||
if (!tdc_const)
|
||||
return 0;
|
||||
|
||||
nest = nla_nest_start(skb, IFLA_CAN_TDC);
|
||||
if (!nest)
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
|
||||
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
|
||||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
|
||||
goto err_cancel;
|
||||
if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) ||
|
||||
nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max))
|
||||
goto err_cancel;
|
||||
if (tdc_const->tdcf_max &&
|
||||
(nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) ||
|
||||
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
|
||||
goto err_cancel;
|
||||
|
||||
if (can_tdc_is_enabled(priv)) {
|
||||
u32 tdcv;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
|
||||
tdcv = tdc->tdcv;
|
||||
err = 0;
|
||||
} else if (priv->do_get_auto_tdcv) {
|
||||
err = priv->do_get_auto_tdcv(dev, &tdcv);
|
||||
}
|
||||
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
|
||||
goto err_cancel;
|
||||
if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco))
|
||||
goto err_cancel;
|
||||
if (tdc_const->tdcf_max &&
|
||||
nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf))
|
||||
goto err_cancel;
|
||||
}
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
return 0;
|
||||
|
||||
err_cancel:
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
|
||||
const struct can_priv *priv)
|
||||
{
|
||||
struct nlattr *nest;
|
||||
|
||||
nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
|
||||
if (!nest)
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
|
||||
priv->ctrlmode_supported)) {
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
@ -318,7 +556,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
|
||||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
|
||||
sizeof(priv->bitrate_max),
|
||||
&priv->bitrate_max))
|
||||
&priv->bitrate_max)) ||
|
||||
|
||||
can_tdc_fill_info(skb, dev) ||
|
||||
|
||||
can_ctrlmode_ext_fill_info(skb, priv)
|
||||
)
|
||||
|
||||
return -EMSGSIZE;
|
||||
|
@ -54,8 +54,11 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
|
||||
work_done++;
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_ERR_FLAG)) {
|
||||
stats->rx_packets++;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,6 @@ struct grcan_priv {
|
||||
struct grcan_dma dma;
|
||||
|
||||
struct sk_buff **echo_skb; /* We allocate this on our own */
|
||||
u8 *txdlc; /* Length of queued frames */
|
||||
|
||||
/* The echo skb pointer, pointing into echo_skb and indicating which
|
||||
* frames can be echoed back. See the "Notes on the tx cyclic buffer
|
||||
@ -515,9 +514,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
|
||||
if (echo) {
|
||||
/* Normal echo of messages */
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += priv->txdlc[i];
|
||||
priv->txdlc[i] = 0;
|
||||
can_get_echo_skb(dev, i, NULL);
|
||||
stats->tx_bytes += can_get_echo_skb(dev, i, NULL);
|
||||
} else {
|
||||
/* For cleanup of untransmitted messages */
|
||||
can_free_echo_skb(dev, i, NULL);
|
||||
@ -1062,16 +1059,10 @@ static int grcan_open(struct net_device *dev)
|
||||
priv->can.echo_skb_max = dma->tx.size;
|
||||
priv->can.echo_skb = priv->echo_skb;
|
||||
|
||||
priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
|
||||
if (!priv->txdlc) {
|
||||
err = -ENOMEM;
|
||||
goto exit_free_echo_skb;
|
||||
}
|
||||
|
||||
/* Get can device up */
|
||||
err = open_candev(dev);
|
||||
if (err)
|
||||
goto exit_free_txdlc;
|
||||
goto exit_free_echo_skb;
|
||||
|
||||
err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
|
||||
dev->name, dev);
|
||||
@ -1093,8 +1084,6 @@ static int grcan_open(struct net_device *dev)
|
||||
|
||||
exit_close_candev:
|
||||
close_candev(dev);
|
||||
exit_free_txdlc:
|
||||
kfree(priv->txdlc);
|
||||
exit_free_echo_skb:
|
||||
kfree(priv->echo_skb);
|
||||
exit_free_dma_buffers:
|
||||
@ -1129,7 +1118,6 @@ static int grcan_close(struct net_device *dev)
|
||||
priv->can.echo_skb_max = 0;
|
||||
priv->can.echo_skb = NULL;
|
||||
kfree(priv->echo_skb);
|
||||
kfree(priv->txdlc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1211,11 +1199,11 @@ static int grcan_receive(struct net_device *dev, int budget)
|
||||
shift = GRCAN_MSG_DATA_SHIFT(i);
|
||||
cf->data[i] = (u8)(slot[j] >> shift);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update statistics and read pointer */
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
|
||||
@ -1447,7 +1435,6 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
|
||||
* can_put_echo_skb would be an error unless other measures are
|
||||
* taken.
|
||||
*/
|
||||
priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
|
||||
can_put_echo_skb(skb, dev, slotindex, 0);
|
||||
|
||||
/* Make sure everything is written before allowing hardware to
|
||||
|
@ -309,15 +309,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev)
|
||||
*(u32 *)(cf->data + i) =
|
||||
readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
|
||||
}
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
/* Remove the packet from FIFO */
|
||||
writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
|
||||
writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
@ -430,8 +430,6 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
|
||||
priv->base + IFI_CANFD_INTERRUPT);
|
||||
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
@ -456,7 +454,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
|
||||
enum can_state new_state)
|
||||
{
|
||||
struct ifi_canfd_priv *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
struct can_berr_counter bec;
|
||||
@ -522,8 +519,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
|
||||
break;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
|
@ -1285,7 +1285,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
|
||||
{
|
||||
struct sk_buff *skb = skb_dequeue(&mod->echoq);
|
||||
struct can_frame *cf;
|
||||
u8 dlc;
|
||||
u8 dlc = 0;
|
||||
|
||||
/* this should never trigger unless there is a driver bug */
|
||||
if (!skb) {
|
||||
@ -1294,7 +1294,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
|
||||
}
|
||||
|
||||
cf = (struct can_frame *)skb->data;
|
||||
dlc = cf->len;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
dlc = cf->len;
|
||||
|
||||
/* check flag whether this packet has to be looped back */
|
||||
if (skb->pkt_type != PACKET_LOOPBACK) {
|
||||
@ -1421,7 +1422,8 @@ static int ican3_recv_skb(struct ican3_dev *mod)
|
||||
|
||||
/* update statistics, receive the skb */
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
err_noalloc:
|
||||
@ -1831,7 +1833,7 @@ static ssize_t termination_show(struct device *dev,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
|
||||
return sysfs_emit(buf, "%u\n", mod->termination_enabled);
|
||||
}
|
||||
|
||||
static ssize_t termination_store(struct device *dev,
|
||||
|
@ -1185,20 +1185,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
|
||||
|
||||
cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
|
||||
|
||||
if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
|
||||
if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cf->data, data, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
shhwtstamps = skb_hwtstamps(skb);
|
||||
|
||||
shhwtstamps->hwtstamp =
|
||||
ns_to_ktime(div_u64(p->timestamp * 1000,
|
||||
pcie->freq_to_ticks_div));
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1310,9 +1311,6 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
|
||||
cf->data[6] = bec.txerr;
|
||||
cf->data[7] = bec.rxerr;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
return 0;
|
||||
}
|
||||
@ -1510,8 +1508,6 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
|
||||
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_BUSERROR;
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
netif_rx(skb);
|
||||
} else {
|
||||
stats->rx_dropped++;
|
||||
|
@ -524,14 +524,14 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
|
||||
cf->data, DIV_ROUND_UP(cf->len, 4));
|
||||
if (err)
|
||||
goto out_free_skb;
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
/* acknowledge rx fifo 0 */
|
||||
m_can_write(cdev, M_CAN_RXF0A, fgi);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
|
||||
|
||||
m_can_receive_skb(cdev, skb, timestamp);
|
||||
@ -653,9 +653,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
if (cdev->is_peripheral)
|
||||
timestamp = m_can_get_timestamp(cdev);
|
||||
|
||||
@ -712,7 +709,6 @@ static int m_can_handle_state_change(struct net_device *dev,
|
||||
enum can_state new_state)
|
||||
{
|
||||
struct m_can_classdev *cdev = netdev_priv(dev);
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
struct can_berr_counter bec;
|
||||
@ -777,9 +773,6 @@ static int m_can_handle_state_change(struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
if (cdev->is_peripheral)
|
||||
timestamp = m_can_get_timestamp(cdev);
|
||||
|
||||
@ -1469,7 +1462,7 @@ static bool m_can_niso_supported(struct m_can_classdev *cdev)
|
||||
static int m_can_dev_setup(struct m_can_classdev *cdev)
|
||||
{
|
||||
struct net_device *dev = cdev->net;
|
||||
int m_can_version;
|
||||
int m_can_version, err;
|
||||
|
||||
m_can_version = m_can_check_core_release(cdev);
|
||||
/* return if unsupported version */
|
||||
@ -1499,7 +1492,9 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
|
||||
switch (cdev->version) {
|
||||
case 30:
|
||||
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
|
||||
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_30X;
|
||||
|
||||
@ -1509,7 +1504,9 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
|
||||
break;
|
||||
case 31:
|
||||
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
|
||||
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_31X;
|
||||
|
||||
|
@ -293,10 +293,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
return -EINVAL;
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base) {
|
||||
dev_err(&ofdev->dev, "couldn't ioremap\n");
|
||||
return err;
|
||||
}
|
||||
if (!base)
|
||||
return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n");
|
||||
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (!irq) {
|
||||
|
@ -401,13 +401,15 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (canrflg & MSCAN_RXF)
|
||||
if (canrflg & MSCAN_RXF) {
|
||||
mscan_get_rx_frame(dev, frame);
|
||||
else if (canrflg & MSCAN_ERR_IF)
|
||||
stats->rx_packets++;
|
||||
if (!(frame->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += frame->len;
|
||||
} else if (canrflg & MSCAN_ERR_IF) {
|
||||
mscan_get_err_frame(dev, frame, canrflg);
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += frame->len;
|
||||
work_done++;
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
@ -446,9 +448,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
|
||||
continue;
|
||||
|
||||
out_8(®s->cantbsel, mask);
|
||||
stats->tx_bytes += in_8(®s->tx.dlr);
|
||||
stats->tx_bytes += can_get_echo_skb(dev, entry->id,
|
||||
NULL);
|
||||
stats->tx_packets++;
|
||||
can_get_echo_skb(dev, entry->id, NULL);
|
||||
priv->tx_active &= ~mask;
|
||||
list_del(pos);
|
||||
}
|
||||
|
@ -561,9 +561,6 @@ static void pch_can_error(struct net_device *ndev, u32 status)
|
||||
|
||||
priv->can.state = state;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
|
||||
static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
|
||||
@ -680,22 +677,23 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
|
||||
cf->can_id = id;
|
||||
}
|
||||
|
||||
if (id2 & PCH_ID2_DIR)
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
|
||||
cf->len = can_cc_dlc2len((ioread32(&priv->regs->
|
||||
ifregs[0].mcont)) & 0xF);
|
||||
|
||||
for (i = 0; i < cf->len; i += 2) {
|
||||
data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
|
||||
cf->data[i] = data_reg;
|
||||
cf->data[i + 1] = data_reg >> 8;
|
||||
}
|
||||
if (id2 & PCH_ID2_DIR) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
} else {
|
||||
for (i = 0; i < cf->len; i += 2) {
|
||||
data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
|
||||
cf->data[i] = data_reg;
|
||||
cf->data[i + 1] = data_reg >> 8;
|
||||
}
|
||||
|
||||
rcv_pkts++;
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
rcv_pkts++;
|
||||
quota--;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
pch_fifo_thresh(priv, obj_num);
|
||||
@ -709,16 +707,13 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
|
||||
{
|
||||
struct pch_can_priv *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &(priv->ndev->stats);
|
||||
u32 dlc;
|
||||
|
||||
can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
|
||||
stats->tx_bytes += can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1,
|
||||
NULL);
|
||||
stats->tx_packets++;
|
||||
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
|
||||
&priv->regs->ifregs[1].cmask);
|
||||
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
|
||||
dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) &
|
||||
PCH_IF_MCONT_DLC);
|
||||
stats->tx_bytes += dlc;
|
||||
stats->tx_packets++;
|
||||
if (int_stat == PCH_TX_OBJ_END)
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
|
@ -266,10 +266,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->echo_lock, flags);
|
||||
can_get_echo_skb(priv->ndev, msg->client, NULL);
|
||||
|
||||
/* count bytes of the echo instead of skb */
|
||||
stats->tx_bytes += cf_len;
|
||||
stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
|
||||
stats->tx_packets++;
|
||||
|
||||
/* restart tx queue (a slot is free) */
|
||||
@ -310,12 +309,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
|
||||
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
|
||||
cf->can_id |= CAN_EFF_FLAG;
|
||||
|
||||
if (rx_msg_flags & PUCAN_MSG_RTR)
|
||||
if (rx_msg_flags & PUCAN_MSG_RTR) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cf->data, msg->d, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
|
||||
@ -409,8 +409,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
|
||||
|
||||
return 0;
|
||||
@ -438,8 +436,6 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
|
||||
cf->data[6] = priv->bec.txerr;
|
||||
cf->data[7] = priv->bec.rxerr;
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
|
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
config CAN_RCAR
|
||||
tristate "Renesas R-Car and RZ/G CAN controller"
|
||||
depends on ARCH_RENESAS || ARM || COMPILE_TEST
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
help
|
||||
Say Y here if you want to use CAN controller found on Renesas R-Car
|
||||
or RZ/G SoCs.
|
||||
@ -11,7 +11,7 @@ config CAN_RCAR
|
||||
|
||||
config CAN_RCAR_CANFD
|
||||
tristate "Renesas R-Car CAN FD controller"
|
||||
depends on ARCH_RENESAS || ARM || COMPILE_TEST
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
help
|
||||
Say Y here if you want to use CAN FD controller found on
|
||||
Renesas R-Car SoCs. The driver puts the controller in CAN FD only
|
||||
|
@ -94,7 +94,6 @@ struct rcar_can_priv {
|
||||
struct rcar_can_regs __iomem *regs;
|
||||
struct clk *clk;
|
||||
struct clk *can_clk;
|
||||
u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
|
||||
u32 tx_head;
|
||||
u32 tx_tail;
|
||||
u8 clock_select;
|
||||
@ -223,7 +222,6 @@ static void tx_failure_cleanup(struct net_device *ndev)
|
||||
static void rcar_can_error(struct net_device *ndev)
|
||||
{
|
||||
struct rcar_can_priv *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
u8 eifr, txerr = 0, rxerr = 0;
|
||||
@ -362,11 +360,8 @@ static void rcar_can_error(struct net_device *ndev)
|
||||
}
|
||||
}
|
||||
|
||||
if (skb) {
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (skb)
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static void rcar_can_tx_done(struct net_device *ndev)
|
||||
@ -383,10 +378,11 @@ static void rcar_can_tx_done(struct net_device *ndev)
|
||||
if (priv->tx_head - priv->tx_tail <= unsent)
|
||||
break;
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
|
||||
RCAR_CAN_FIFO_DEPTH];
|
||||
priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
|
||||
can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
|
||||
stats->tx_bytes +=
|
||||
can_get_echo_skb(ndev,
|
||||
priv->tx_tail % RCAR_CAN_FIFO_DEPTH,
|
||||
NULL);
|
||||
|
||||
priv->tx_tail++;
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
@ -616,7 +612,6 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
|
||||
|
||||
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
|
||||
|
||||
priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
|
||||
can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
|
||||
priv->tx_head++;
|
||||
/* Start Tx: write 0xff to the TFPCR register to increment
|
||||
@ -666,12 +661,13 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
|
||||
for (dlc = 0; dlc < cf->len; dlc++)
|
||||
cf->data[dlc] =
|
||||
readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,6 @@ struct rcar_canfd_channel {
|
||||
struct rcar_canfd_global *gpriv; /* Controller reference */
|
||||
void __iomem *base; /* Register base address */
|
||||
struct napi_struct napi;
|
||||
u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
|
||||
u32 tx_head; /* Incremented on xmit */
|
||||
u32 tx_tail; /* Incremented on xmit done */
|
||||
u32 channel; /* Channel number */
|
||||
@ -1033,8 +1032,6 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
|
||||
/* Clear channel error interrupts that are handled */
|
||||
rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
|
||||
RCANFD_CERFL_ERR(~cerfl));
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1051,9 +1048,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
|
||||
|
||||
sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += priv->tx_len[sent];
|
||||
priv->tx_len[sent] = 0;
|
||||
can_get_echo_skb(ndev, sent, NULL);
|
||||
stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL);
|
||||
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
priv->tx_tail++;
|
||||
@ -1174,8 +1169,6 @@ static void rcar_canfd_state_change(struct net_device *ndev,
|
||||
rx_state = txerr <= rxerr ? state : 0;
|
||||
|
||||
can_change_state(ndev, cf, tx_state, rx_state);
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
@ -1465,7 +1458,6 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
|
||||
RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
|
||||
}
|
||||
|
||||
priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
|
||||
can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
|
||||
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
@ -1554,7 +1546,8 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
|
||||
|
||||
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
@ -1705,7 +1698,9 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
&rcar_canfd_data_bittiming_const;
|
||||
|
||||
/* Controller starts in CAN FD only mode */
|
||||
can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
|
||||
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
|
||||
if (err)
|
||||
goto fail;
|
||||
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
|
||||
} else {
|
||||
/* Controller starts in Classical CAN only mode */
|
||||
@ -1720,15 +1715,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
|
||||
netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
|
||||
RCANFD_NAPI_WEIGHT);
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
devm_can_led_init(ndev);
|
||||
gpriv->ch[priv->channel] = priv;
|
||||
err = register_candev(ndev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"register_candev() failed, error %d\n", err);
|
||||
goto fail_candev;
|
||||
}
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
devm_can_led_init(ndev);
|
||||
gpriv->ch[priv->channel] = priv;
|
||||
dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
|
||||
return 0;
|
||||
|
||||
|
@ -372,15 +372,16 @@ static void sja1000_rx(struct net_device *dev)
|
||||
} else {
|
||||
for (i = 0; i < cf->len; i++)
|
||||
cf->data[i] = priv->read_reg(priv, dreg++);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
cf->can_id = id;
|
||||
|
||||
/* release receive buffer */
|
||||
sja1000_write_cmdreg(priv, CMD_RRB);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
can_led_event(dev, CAN_LED_EVENT_RX);
|
||||
@ -487,8 +488,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
can_bus_off(dev);
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
@ -528,10 +527,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
||||
can_free_echo_skb(dev, 0, NULL);
|
||||
} else {
|
||||
/* transmission complete */
|
||||
stats->tx_bytes +=
|
||||
priv->read_reg(priv, SJA1000_FI) & 0xf;
|
||||
stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
|
||||
stats->tx_packets++;
|
||||
can_get_echo_skb(dev, 0, NULL);
|
||||
}
|
||||
netif_wake_queue(dev);
|
||||
can_led_event(dev, CAN_LED_EVENT_TX);
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include "sja1000.h"
|
||||
|
||||
@ -234,13 +233,15 @@ static int sp_probe(struct platform_device *pdev)
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of)
|
||||
irq = irq_of_parse_and_map(of, 0);
|
||||
else
|
||||
if (of) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
} else {
|
||||
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
|
||||
if (!irq && !res_irq)
|
||||
return -ENODEV;
|
||||
if (!res_irq)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
of_id = of_match_device(sp_of_table, &pdev->dev);
|
||||
if (of_id && of_id->data) {
|
||||
|
@ -218,7 +218,9 @@ static void slc_bump(struct slcan *sl)
|
||||
skb_put_data(skb, &cf, sizeof(struct can_frame));
|
||||
|
||||
sl->dev->stats.rx_packets++;
|
||||
sl->dev->stats.rx_bytes += cf.len;
|
||||
if (!(cf.can_id & CAN_RTR_FLAG))
|
||||
sl->dev->stats.rx_bytes += cf.len;
|
||||
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
@ -288,6 +290,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
|
||||
if (!(cf->can_id & CAN_RTR_FLAG)) {
|
||||
for (i = 0; i < cf->len; i++)
|
||||
pos = hex_byte_pack_upper(pos, cf->data[i]);
|
||||
|
||||
sl->dev->stats.tx_bytes += cf->len;
|
||||
}
|
||||
|
||||
*pos++ = '\r';
|
||||
@ -304,7 +308,6 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
|
||||
actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
|
||||
sl->xleft = (pos - sl->xbuff) - actual;
|
||||
sl->xhead = sl->xbuff + actual;
|
||||
sl->dev->stats.tx_bytes += cf->len;
|
||||
}
|
||||
|
||||
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
|
||||
@ -664,15 +667,14 @@ static void slcan_close(struct tty_struct *tty)
|
||||
/* This will complete via sl_free_netdev */
|
||||
}
|
||||
|
||||
static int slcan_hangup(struct tty_struct *tty)
|
||||
static void slcan_hangup(struct tty_struct *tty)
|
||||
{
|
||||
slcan_close(tty);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active SLCAN channel. */
|
||||
static int slcan_ioctl(struct tty_struct *tty, struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct slcan *sl = (struct slcan *) tty->disc_data;
|
||||
unsigned int tmp;
|
||||
@ -692,7 +694,7 @@ static int slcan_ioctl(struct tty_struct *tty, struct file *file,
|
||||
return -EINVAL;
|
||||
|
||||
default:
|
||||
return tty_mode_ioctl(tty, file, cmd, arg);
|
||||
return tty_mode_ioctl(tty, cmd, arg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,10 @@ static int softing_handle_1(struct softing *card)
|
||||
skb = priv->can.echo_skb[priv->tx.echo_get];
|
||||
if (skb)
|
||||
skb->tstamp = ktime;
|
||||
can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
|
||||
++netdev->stats.tx_packets;
|
||||
netdev->stats.tx_bytes +=
|
||||
can_get_echo_skb(netdev, priv->tx.echo_get,
|
||||
NULL);
|
||||
++priv->tx.echo_get;
|
||||
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
|
||||
priv->tx.echo_get = 0;
|
||||
@ -290,9 +293,6 @@ static int softing_handle_1(struct softing *card)
|
||||
--priv->tx.pending;
|
||||
if (card->tx.pending)
|
||||
--card->tx.pending;
|
||||
++netdev->stats.tx_packets;
|
||||
if (!(msg.can_id & CAN_RTR_FLAG))
|
||||
netdev->stats.tx_bytes += msg.len;
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
|
@ -25,11 +25,11 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spi/spi.h>
|
||||
@ -153,7 +153,6 @@ struct hi3110_priv {
|
||||
u8 *spi_rx_buf;
|
||||
|
||||
struct sk_buff *tx_skb;
|
||||
int tx_len;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct tx_work;
|
||||
@ -166,6 +165,8 @@ struct hi3110_priv {
|
||||
#define HI3110_AFTER_SUSPEND_POWER 4
|
||||
#define HI3110_AFTER_SUSPEND_RESTART 8
|
||||
int restart_tx;
|
||||
bool tx_busy;
|
||||
|
||||
struct regulator *power;
|
||||
struct regulator *transceiver;
|
||||
struct clk *clk;
|
||||
@ -175,13 +176,13 @@ static void hi3110_clean(struct net_device *net)
|
||||
{
|
||||
struct hi3110_priv *priv = netdev_priv(net);
|
||||
|
||||
if (priv->tx_skb || priv->tx_len)
|
||||
if (priv->tx_skb || priv->tx_busy)
|
||||
net->stats.tx_errors++;
|
||||
dev_kfree_skb(priv->tx_skb);
|
||||
if (priv->tx_len)
|
||||
if (priv->tx_busy)
|
||||
can_free_echo_skb(priv->net, 0, NULL);
|
||||
priv->tx_skb = NULL;
|
||||
priv->tx_len = 0;
|
||||
priv->tx_busy = false;
|
||||
}
|
||||
|
||||
/* Note about handling of error return of hi3110_spi_trans: accessing
|
||||
@ -343,14 +344,15 @@ static void hi3110_hw_rx(struct spi_device *spi)
|
||||
/* Data length */
|
||||
frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
|
||||
|
||||
if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
|
||||
if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) {
|
||||
frame->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
|
||||
frame->len);
|
||||
|
||||
priv->net->stats.rx_bytes += frame->len;
|
||||
}
|
||||
priv->net->stats.rx_packets++;
|
||||
priv->net->stats.rx_bytes += frame->len;
|
||||
|
||||
can_led_event(priv->net, CAN_LED_EVENT_RX);
|
||||
|
||||
@ -368,7 +370,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
|
||||
struct hi3110_priv *priv = netdev_priv(net);
|
||||
struct spi_device *spi = priv->spi;
|
||||
|
||||
if (priv->tx_skb || priv->tx_len) {
|
||||
if (priv->tx_skb || priv->tx_busy) {
|
||||
dev_err(&spi->dev, "hard_xmit called while tx busy\n");
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
@ -585,7 +587,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
|
||||
} else {
|
||||
frame = (struct can_frame *)priv->tx_skb->data;
|
||||
hi3110_hw_tx(spi, frame);
|
||||
priv->tx_len = 1 + frame->len;
|
||||
priv->tx_busy = true;
|
||||
can_put_echo_skb(priv->tx_skb, net, 0, 0);
|
||||
priv->tx_skb = NULL;
|
||||
}
|
||||
@ -720,14 +722,11 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
|
||||
if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
|
||||
net->stats.tx_packets++;
|
||||
net->stats.tx_bytes += priv->tx_len - 1;
|
||||
net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
|
||||
can_led_event(net, CAN_LED_EVENT_TX);
|
||||
if (priv->tx_len) {
|
||||
can_get_echo_skb(net, 0, NULL);
|
||||
priv->tx_len = 0;
|
||||
}
|
||||
priv->tx_busy = false;
|
||||
netif_wake_queue(net);
|
||||
}
|
||||
|
||||
@ -754,7 +753,7 @@ static int hi3110_open(struct net_device *net)
|
||||
|
||||
priv->force_quit = 0;
|
||||
priv->tx_skb = NULL;
|
||||
priv->tx_len = 0;
|
||||
priv->tx_busy = false;
|
||||
|
||||
ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
|
||||
flags, DEVICE_NAME, priv);
|
||||
@ -828,19 +827,25 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table);
|
||||
|
||||
static int hi3110_can_probe(struct spi_device *spi)
|
||||
{
|
||||
const struct of_device_id *of_id = of_match_device(hi3110_of_match,
|
||||
&spi->dev);
|
||||
struct device *dev = &spi->dev;
|
||||
struct net_device *net;
|
||||
struct hi3110_priv *priv;
|
||||
const void *match;
|
||||
struct clk *clk;
|
||||
int freq, ret;
|
||||
u32 freq;
|
||||
int ret;
|
||||
|
||||
clk = devm_clk_get(&spi->dev, NULL);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&spi->dev, "no CAN clock source defined\n");
|
||||
return PTR_ERR(clk);
|
||||
clk = devm_clk_get_optional(&spi->dev, NULL);
|
||||
if (IS_ERR(clk))
|
||||
return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n");
|
||||
|
||||
if (clk) {
|
||||
freq = clk_get_rate(clk);
|
||||
} else {
|
||||
ret = device_property_read_u32(dev, "clock-frequency", &freq);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n");
|
||||
}
|
||||
freq = clk_get_rate(clk);
|
||||
|
||||
/* Sanity check */
|
||||
if (freq > 40000000)
|
||||
@ -851,11 +856,9 @@ static int hi3110_can_probe(struct spi_device *spi)
|
||||
if (!net)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!IS_ERR(clk)) {
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
net->netdev_ops = &hi3110_netdev_ops;
|
||||
net->flags |= IFF_ECHO;
|
||||
@ -870,8 +873,9 @@ static int hi3110_can_probe(struct spi_device *spi)
|
||||
CAN_CTRLMODE_LISTENONLY |
|
||||
CAN_CTRLMODE_BERR_REPORTING;
|
||||
|
||||
if (of_id)
|
||||
priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
|
||||
match = device_get_match_data(dev);
|
||||
if (match)
|
||||
priv->model = (enum hi3110_model)(uintptr_t)match;
|
||||
else
|
||||
priv->model = spi_get_device_id(spi)->driver_data;
|
||||
priv->net = net;
|
||||
@ -918,9 +922,7 @@ static int hi3110_can_probe(struct spi_device *spi)
|
||||
|
||||
ret = hi3110_hw_probe(spi);
|
||||
if (ret) {
|
||||
if (ret == -ENODEV)
|
||||
dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
|
||||
priv->model);
|
||||
dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model);
|
||||
goto error_probe;
|
||||
}
|
||||
hi3110_hw_sleep(spi);
|
||||
@ -938,14 +940,12 @@ static int hi3110_can_probe(struct spi_device *spi)
|
||||
hi3110_power_enable(priv->power, 0);
|
||||
|
||||
out_clk:
|
||||
if (!IS_ERR(clk))
|
||||
clk_disable_unprepare(clk);
|
||||
clk_disable_unprepare(clk);
|
||||
|
||||
out_free:
|
||||
free_candev(net);
|
||||
|
||||
dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
|
||||
return ret;
|
||||
return dev_err_probe(dev, ret, "Probe failed\n");
|
||||
}
|
||||
|
||||
static int hi3110_can_remove(struct spi_device *spi)
|
||||
@ -957,8 +957,7 @@ static int hi3110_can_remove(struct spi_device *spi)
|
||||
|
||||
hi3110_power_enable(priv->power, 0);
|
||||
|
||||
if (!IS_ERR(priv->clk))
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
free_candev(net);
|
||||
|
||||
|
@ -237,7 +237,6 @@ struct mcp251x_priv {
|
||||
u8 *spi_rx_buf;
|
||||
|
||||
struct sk_buff *tx_skb;
|
||||
int tx_len;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct tx_work;
|
||||
@ -250,6 +249,8 @@ struct mcp251x_priv {
|
||||
#define AFTER_SUSPEND_POWER 4
|
||||
#define AFTER_SUSPEND_RESTART 8
|
||||
int restart_tx;
|
||||
bool tx_busy;
|
||||
|
||||
struct regulator *power;
|
||||
struct regulator *transceiver;
|
||||
struct clk *clk;
|
||||
@ -272,13 +273,13 @@ static void mcp251x_clean(struct net_device *net)
|
||||
{
|
||||
struct mcp251x_priv *priv = netdev_priv(net);
|
||||
|
||||
if (priv->tx_skb || priv->tx_len)
|
||||
if (priv->tx_skb || priv->tx_busy)
|
||||
net->stats.tx_errors++;
|
||||
dev_kfree_skb(priv->tx_skb);
|
||||
if (priv->tx_len)
|
||||
if (priv->tx_busy)
|
||||
can_free_echo_skb(priv->net, 0, NULL);
|
||||
priv->tx_skb = NULL;
|
||||
priv->tx_len = 0;
|
||||
priv->tx_busy = false;
|
||||
}
|
||||
|
||||
/* Note about handling of error return of mcp251x_spi_trans: accessing
|
||||
@ -600,9 +601,6 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
|
||||
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
|
||||
gpio->names = mcp251x_gpio_names;
|
||||
gpio->can_sleep = true;
|
||||
#ifdef CONFIG_OF_GPIO
|
||||
gpio->of_node = priv->spi->dev.of_node;
|
||||
#endif
|
||||
|
||||
return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
|
||||
}
|
||||
@ -733,10 +731,12 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
|
||||
}
|
||||
/* Data length */
|
||||
frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
|
||||
memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
|
||||
if (!(frame->can_id & CAN_RTR_FLAG)) {
|
||||
memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
|
||||
|
||||
priv->net->stats.rx_bytes += frame->len;
|
||||
}
|
||||
priv->net->stats.rx_packets++;
|
||||
priv->net->stats.rx_bytes += frame->len;
|
||||
|
||||
can_led_event(priv->net, CAN_LED_EVENT_RX);
|
||||
|
||||
@ -786,7 +786,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
|
||||
struct mcp251x_priv *priv = netdev_priv(net);
|
||||
struct spi_device *spi = priv->spi;
|
||||
|
||||
if (priv->tx_skb || priv->tx_len) {
|
||||
if (priv->tx_skb || priv->tx_busy) {
|
||||
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
@ -1011,7 +1011,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
|
||||
if (frame->len > CAN_FRAME_MAX_DATA_LEN)
|
||||
frame->len = CAN_FRAME_MAX_DATA_LEN;
|
||||
mcp251x_hw_tx(spi, frame, 0);
|
||||
priv->tx_len = 1 + frame->len;
|
||||
priv->tx_busy = true;
|
||||
can_put_echo_skb(priv->tx_skb, net, 0, 0);
|
||||
priv->tx_skb = NULL;
|
||||
}
|
||||
@ -1177,12 +1177,12 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
|
||||
break;
|
||||
|
||||
if (intf & CANINTF_TX) {
|
||||
net->stats.tx_packets++;
|
||||
net->stats.tx_bytes += priv->tx_len - 1;
|
||||
can_led_event(net, CAN_LED_EVENT_TX);
|
||||
if (priv->tx_len) {
|
||||
can_get_echo_skb(net, 0, NULL);
|
||||
priv->tx_len = 0;
|
||||
if (priv->tx_busy) {
|
||||
net->stats.tx_packets++;
|
||||
net->stats.tx_bytes += can_get_echo_skb(net, 0,
|
||||
NULL);
|
||||
priv->tx_busy = false;
|
||||
}
|
||||
netif_wake_queue(net);
|
||||
}
|
||||
@ -1209,7 +1209,7 @@ static int mcp251x_open(struct net_device *net)
|
||||
|
||||
priv->force_quit = 0;
|
||||
priv->tx_skb = NULL;
|
||||
priv->tx_len = 0;
|
||||
priv->tx_busy = false;
|
||||
|
||||
if (!dev_fwnode(&spi->dev))
|
||||
flags = IRQF_TRIGGER_FALLING;
|
||||
|
@ -3,9 +3,14 @@
|
||||
obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
|
||||
|
||||
mcp251xfd-objs :=
|
||||
mcp251xfd-objs += mcp251xfd-chip-fifo.o
|
||||
mcp251xfd-objs += mcp251xfd-core.o
|
||||
mcp251xfd-objs += mcp251xfd-crc16.o
|
||||
mcp251xfd-objs += mcp251xfd-regmap.o
|
||||
mcp251xfd-objs += mcp251xfd-ring.o
|
||||
mcp251xfd-objs += mcp251xfd-rx.o
|
||||
mcp251xfd-objs += mcp251xfd-tef.o
|
||||
mcp251xfd-objs += mcp251xfd-timestamp.o
|
||||
mcp251xfd-objs += mcp251xfd-tx.o
|
||||
|
||||
mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -250,7 +250,6 @@ mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const b
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
|
||||
struct spi_message *msg, unsigned int data_len)
|
||||
|
@ -10,6 +10,7 @@
|
||||
#ifndef _MCP251XFD_H
|
||||
#define _MCP251XFD_H
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/can/core.h>
|
||||
#include <linux/can/dev.h>
|
||||
#include <linux/can/rx-offload.h>
|
||||
@ -625,6 +626,12 @@ MCP251XFD_IS(2517);
|
||||
MCP251XFD_IS(2518);
|
||||
MCP251XFD_IS(251X);
|
||||
|
||||
static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
|
||||
{
|
||||
/* listen-only mode works like FD mode */
|
||||
return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD);
|
||||
}
|
||||
|
||||
static inline u8 mcp251xfd_first_byte_set(u32 mask)
|
||||
{
|
||||
return (mask & 0x0000ffff) ?
|
||||
@ -761,6 +768,24 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
|
||||
return ring->base + ring->obj_size * n;
|
||||
}
|
||||
|
||||
static inline int
|
||||
mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
|
||||
u8 *tx_tail)
|
||||
{
|
||||
u32 fifo_sta;
|
||||
int err;
|
||||
|
||||
err = regmap_read(priv->map_reg,
|
||||
MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
|
||||
&fifo_sta);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
|
||||
{
|
||||
return priv->tef->head & (priv->tx->obj_num - 1);
|
||||
@ -849,15 +874,24 @@ mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
|
||||
(n) < (priv)->rx_ring_num; \
|
||||
(n)++, (ring) = *((priv)->rx + (n)))
|
||||
|
||||
int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
|
||||
int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
|
||||
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
|
||||
const void *data, size_t data_size);
|
||||
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
|
||||
int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
|
||||
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
|
||||
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
|
||||
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
|
||||
struct sk_buff *skb, u32 timestamp);
|
||||
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
|
||||
|
||||
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *ndev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEV_COREDUMP)
|
||||
void mcp251xfd_dump(const struct mcp251xfd_priv *priv);
|
||||
#else
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
#define DRV_NAME "sun4i_can"
|
||||
|
||||
@ -200,10 +201,20 @@
|
||||
#define SUN4I_CAN_MAX_IRQ 20
|
||||
#define SUN4I_MODE_MAX_RETRIES 100
|
||||
|
||||
/**
|
||||
* struct sun4ican_quirks - Differences between SoC variants.
|
||||
*
|
||||
* @has_reset: SoC needs reset deasserted.
|
||||
*/
|
||||
struct sun4ican_quirks {
|
||||
bool has_reset;
|
||||
};
|
||||
|
||||
struct sun4ican_priv {
|
||||
struct can_priv can;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct reset_control *reset;
|
||||
spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
|
||||
};
|
||||
|
||||
@ -490,18 +501,20 @@ static void sun4i_can_rx(struct net_device *dev)
|
||||
}
|
||||
|
||||
/* remote frame ? */
|
||||
if (fi & SUN4I_MSG_RTR_FLAG)
|
||||
if (fi & SUN4I_MSG_RTR_FLAG) {
|
||||
id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
for (i = 0; i < cf->len; i++)
|
||||
cf->data[i] = readl(priv->base + dreg + i * 4);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
cf->can_id = id;
|
||||
|
||||
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
can_led_event(dev, CAN_LED_EVENT_RX);
|
||||
@ -622,13 +635,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
|
||||
can_bus_off(dev);
|
||||
}
|
||||
|
||||
if (likely(skb)) {
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (likely(skb))
|
||||
netif_rx(skb);
|
||||
} else {
|
||||
else
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -651,11 +661,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
|
||||
|
||||
if (isrc & SUN4I_INT_TBUF_VLD) {
|
||||
/* transmission complete interrupt */
|
||||
stats->tx_bytes +=
|
||||
readl(priv->base +
|
||||
SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
|
||||
stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
|
||||
stats->tx_packets++;
|
||||
can_get_echo_skb(dev, 0, NULL);
|
||||
netif_wake_queue(dev);
|
||||
can_led_event(dev, CAN_LED_EVENT_TX);
|
||||
}
|
||||
@ -702,6 +709,13 @@ static int sun4ican_open(struct net_device *dev)
|
||||
goto exit_irq;
|
||||
}
|
||||
|
||||
/* software reset deassert */
|
||||
err = reset_control_deassert(priv->reset);
|
||||
if (err) {
|
||||
netdev_err(dev, "could not deassert CAN reset\n");
|
||||
goto exit_soft_reset;
|
||||
}
|
||||
|
||||
/* turn on clocking for CAN peripheral block */
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err) {
|
||||
@ -723,6 +737,8 @@ static int sun4ican_open(struct net_device *dev)
|
||||
exit_can_start:
|
||||
clk_disable_unprepare(priv->clk);
|
||||
exit_clock:
|
||||
reset_control_assert(priv->reset);
|
||||
exit_soft_reset:
|
||||
free_irq(dev->irq, dev);
|
||||
exit_irq:
|
||||
close_candev(dev);
|
||||
@ -736,6 +752,7 @@ static int sun4ican_close(struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
sun4i_can_stop(dev);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
reset_control_assert(priv->reset);
|
||||
|
||||
free_irq(dev->irq, dev);
|
||||
close_candev(dev);
|
||||
@ -750,9 +767,27 @@ static const struct net_device_ops sun4ican_netdev_ops = {
|
||||
.ndo_start_xmit = sun4ican_start_xmit,
|
||||
};
|
||||
|
||||
static const struct sun4ican_quirks sun4ican_quirks_a10 = {
|
||||
.has_reset = false,
|
||||
};
|
||||
|
||||
static const struct sun4ican_quirks sun4ican_quirks_r40 = {
|
||||
.has_reset = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id sun4ican_of_match[] = {
|
||||
{.compatible = "allwinner,sun4i-a10-can"},
|
||||
{},
|
||||
{
|
||||
.compatible = "allwinner,sun4i-a10-can",
|
||||
.data = &sun4ican_quirks_a10
|
||||
}, {
|
||||
.compatible = "allwinner,sun7i-a20-can",
|
||||
.data = &sun4ican_quirks_a10
|
||||
}, {
|
||||
.compatible = "allwinner,sun8i-r40-can",
|
||||
.data = &sun4ican_quirks_r40
|
||||
}, {
|
||||
/* sentinel */
|
||||
},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, sun4ican_of_match);
|
||||
@ -771,10 +806,28 @@ static int sun4ican_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct clk *clk;
|
||||
struct reset_control *reset = NULL;
|
||||
void __iomem *addr;
|
||||
int err, irq;
|
||||
struct net_device *dev;
|
||||
struct sun4ican_priv *priv;
|
||||
const struct sun4ican_quirks *quirks;
|
||||
|
||||
quirks = of_device_get_match_data(&pdev->dev);
|
||||
if (!quirks) {
|
||||
dev_err(&pdev->dev, "failed to determine the quirks to use\n");
|
||||
err = -ENODEV;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (quirks->has_reset) {
|
||||
reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
|
||||
if (IS_ERR(reset)) {
|
||||
dev_err(&pdev->dev, "unable to request reset\n");
|
||||
err = PTR_ERR(reset);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk)) {
|
||||
@ -818,6 +871,7 @@ static int sun4ican_probe(struct platform_device *pdev)
|
||||
CAN_CTRLMODE_3_SAMPLES;
|
||||
priv->base = addr;
|
||||
priv->clk = clk;
|
||||
priv->reset = reset;
|
||||
spin_lock_init(&priv->cmdreg_lock);
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
@ -859,7 +859,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||
struct net_device *ndev = (struct net_device *)0;
|
||||
struct ti_hecc_priv *priv;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct resource *irq;
|
||||
struct regulator *reg_xceiver;
|
||||
int err = -ENODEV;
|
||||
|
||||
@ -904,9 +903,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!irq) {
|
||||
dev_err(&pdev->dev, "No irq resource\n");
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq < 0) {
|
||||
err = ndev->irq;
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
@ -920,7 +919,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
|
||||
|
||||
spin_lock_init(&priv->mbx_lock);
|
||||
ndev->irq = irq->start;
|
||||
ndev->flags |= IFF_ECHO;
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
@ -230,7 +230,6 @@ struct ems_tx_urb_context {
|
||||
struct ems_usb *dev;
|
||||
|
||||
u32 echo_index;
|
||||
u8 dlc;
|
||||
};
|
||||
|
||||
struct ems_usb {
|
||||
@ -320,10 +319,11 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
|
||||
} else {
|
||||
for (i = 0; i < cf->len; i++)
|
||||
cf->data[i] = msg->msg.can_msg.msg[i];
|
||||
}
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -397,8 +397,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
|
||||
stats->rx_errors++;
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -518,9 +516,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
|
||||
|
||||
/* transmission complete interrupt */
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += context->dlc;
|
||||
|
||||
can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index,
|
||||
NULL);
|
||||
|
||||
/* Release context */
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
@ -806,7 +803,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
|
||||
|
||||
context->dev = dev;
|
||||
context->echo_index = i;
|
||||
context->dlc = cf->len;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
|
||||
size, ems_usb_write_bulk_callback, context);
|
||||
|
@ -183,7 +183,6 @@ struct esd_usb2_net_priv;
|
||||
struct esd_tx_urb_context {
|
||||
struct esd_usb2_net_priv *priv;
|
||||
u32 echo_index;
|
||||
int len; /* CAN payload length */
|
||||
};
|
||||
|
||||
struct esd_usb2 {
|
||||
@ -293,8 +292,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
|
||||
priv->bec.txerr = txerr;
|
||||
priv->bec.rxerr = rxerr;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
@ -334,10 +331,11 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
|
||||
} else {
|
||||
for (i = 0; i < cf->len; i++)
|
||||
cf->data[i] = msg->msg.rx.data[i];
|
||||
}
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -358,8 +356,8 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
|
||||
|
||||
if (!msg->msg.txdone.status) {
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += context->len;
|
||||
can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index,
|
||||
NULL);
|
||||
} else {
|
||||
stats->tx_errors++;
|
||||
can_free_echo_skb(netdev, context->echo_index, NULL);
|
||||
@ -784,7 +782,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
|
||||
|
||||
context->priv = priv;
|
||||
context->echo_index = i;
|
||||
context->len = cf->len;
|
||||
|
||||
/* hnd must not be 0 - MSB is stripped in txdone handling */
|
||||
msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
|
||||
|
@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/units.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "es58x_core.h"
|
||||
@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = {
|
||||
.bittiming_const = &es581_4_bittiming_const,
|
||||
.data_bittiming_const = NULL,
|
||||
.tdc_const = NULL,
|
||||
.bitrate_max = 1 * CAN_MBPS,
|
||||
.clock = {.freq = 50 * CAN_MHZ},
|
||||
.bitrate_max = 1 * MEGA /* BPS */,
|
||||
.clock = {.freq = 50 * MEGA /* Hz */},
|
||||
.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
|
||||
.tx_start_of_frame = 0xAFAF,
|
||||
.rx_start_of_frame = 0xFAFA,
|
||||
|
@ -192,7 +192,7 @@ struct es581_4_urb_cmd {
|
||||
struct es581_4_rx_cmd_ret rx_cmd_ret;
|
||||
__le64 timestamp;
|
||||
u8 rx_cmd_ret_u8;
|
||||
u8 raw_msg[0];
|
||||
DECLARE_FLEX_ARRAY(u8, raw_msg);
|
||||
} __packed;
|
||||
|
||||
__le16 reserved_for_crc16_do_not_use;
|
||||
|
@ -849,13 +849,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
|
||||
break;
|
||||
}
|
||||
|
||||
/* driver/net/can/dev.c:can_restart() takes in account error
|
||||
* messages in the RX stats. Doing the same here for
|
||||
* consistency.
|
||||
*/
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += CAN_ERR_DLC;
|
||||
|
||||
if (cf) {
|
||||
if (cf->data[1])
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
@ -2096,6 +2089,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
|
||||
|
||||
netdev->netdev_ops = &es58x_netdev_ops;
|
||||
netdev->flags |= IFF_ECHO; /* We support local echo */
|
||||
netdev->dev_port = channel_idx;
|
||||
|
||||
ret = register_candev(netdev);
|
||||
if (ret)
|
||||
|
@ -12,6 +12,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/units.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "es58x_core.h"
|
||||
@ -428,7 +429,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
|
||||
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
|
||||
&priv->can.data_bittiming);
|
||||
|
||||
if (priv->can.tdc.tdco) {
|
||||
if (can_tdc_is_enabled(&priv->can)) {
|
||||
tx_conf_msg.tdc_enabled = 1;
|
||||
tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
|
||||
tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
|
||||
@ -505,8 +506,11 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
|
||||
* Register" from Microchip.
|
||||
*/
|
||||
static const struct can_tdc_const es58x_tdc_const = {
|
||||
.tdcv_min = 0,
|
||||
.tdcv_max = 0, /* Manual mode not supported. */
|
||||
.tdco_min = 0,
|
||||
.tdco_max = 127,
|
||||
.tdcf_min = 0,
|
||||
.tdcf_max = 127
|
||||
};
|
||||
|
||||
@ -519,11 +523,11 @@ const struct es58x_parameters es58x_fd_param = {
|
||||
* Mbps work in an optimal environment but are not recommended
|
||||
* for production environment.
|
||||
*/
|
||||
.bitrate_max = 8 * CAN_MBPS,
|
||||
.clock = {.freq = 80 * CAN_MHZ},
|
||||
.bitrate_max = 8 * MEGA /* BPS */,
|
||||
.clock = {.freq = 80 * MEGA /* Hz */},
|
||||
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
|
||||
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
|
||||
CAN_CTRLMODE_CC_LEN8_DLC,
|
||||
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
|
||||
.tx_start_of_frame = 0xCEFA, /* FACE in little endian */
|
||||
.rx_start_of_frame = 0xFECA, /* CAFE in little endian */
|
||||
.tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN,
|
||||
|
@ -219,7 +219,7 @@ struct es58x_fd_urb_cmd {
|
||||
struct es58x_fd_tx_ack_msg tx_ack_msg;
|
||||
__le64 timestamp;
|
||||
__le32 rx_cmd_ret_le32;
|
||||
u8 raw_msg[0];
|
||||
DECLARE_FLEX_ARRAY(u8, raw_msg);
|
||||
} __packed;
|
||||
|
||||
__le16 reserved_for_crc16_do_not_use;
|
||||
|
@ -352,25 +352,24 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
||||
} else { /* echo_id == hf->echo_id */
|
||||
if (hf->echo_id >= GS_MAX_TX_URBS) {
|
||||
netdev_err(netdev,
|
||||
"Unexpected out of range echo id %d\n",
|
||||
"Unexpected out of range echo id %u\n",
|
||||
hf->echo_id);
|
||||
goto resubmit_urb;
|
||||
}
|
||||
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += hf->can_dlc;
|
||||
|
||||
txc = gs_get_tx_context(dev, hf->echo_id);
|
||||
|
||||
/* bad devices send bad echo_ids. */
|
||||
if (!txc) {
|
||||
netdev_err(netdev,
|
||||
"Unexpected unused echo id %d\n",
|
||||
"Unexpected unused echo id %u\n",
|
||||
hf->echo_id);
|
||||
goto resubmit_urb;
|
||||
}
|
||||
|
||||
can_get_echo_skb(netdev, hf->echo_id, NULL);
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
|
||||
NULL);
|
||||
|
||||
gs_free_tx_context(txc);
|
||||
|
||||
@ -459,7 +458,7 @@ static void gs_usb_xmit_callback(struct urb *urb)
|
||||
struct net_device *netdev = dev->netdev;
|
||||
|
||||
if (urb->status)
|
||||
netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
|
||||
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
|
||||
|
||||
usb_free_coherent(urb->dev,
|
||||
urb->transfer_buffer_length,
|
||||
@ -502,7 +501,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
|
||||
idx = txc->echo_id;
|
||||
|
||||
if (idx >= GS_MAX_TX_URBS) {
|
||||
netdev_err(netdev, "Invalid tx context %d\n", idx);
|
||||
netdev_err(netdev, "Invalid tx context %u\n", idx);
|
||||
goto badidx;
|
||||
}
|
||||
|
||||
@ -969,11 +968,11 @@ static int gs_usb_probe(struct usb_interface *intf,
|
||||
}
|
||||
|
||||
icount = dconf->icount + 1;
|
||||
dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
|
||||
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
|
||||
|
||||
if (icount > GS_MAX_INTF) {
|
||||
dev_err(&intf->dev,
|
||||
"Driver cannot handle more that %d CAN interfaces\n",
|
||||
"Driver cannot handle more that %u CAN interfaces\n",
|
||||
GS_MAX_INTF);
|
||||
kfree(dconf);
|
||||
return -EINVAL;
|
||||
|
@ -77,7 +77,6 @@ struct kvaser_usb_dev_card_data {
|
||||
struct kvaser_usb_tx_urb_context {
|
||||
struct kvaser_usb_net_priv *priv;
|
||||
u32 echo_index;
|
||||
int dlc;
|
||||
};
|
||||
|
||||
struct kvaser_usb {
|
||||
@ -162,8 +161,8 @@ struct kvaser_usb_dev_ops {
|
||||
void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
|
||||
int len);
|
||||
void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
int *cmd_len, u16 transid);
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
u16 transid);
|
||||
};
|
||||
|
||||
struct kvaser_usb_dev_cfg {
|
||||
|
@ -279,8 +279,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
@ -567,7 +565,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
|
||||
buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
|
||||
context->echo_index);
|
||||
if (!buf) {
|
||||
stats->tx_dropped++;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/units.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#include <linux/can.h>
|
||||
@ -295,6 +296,7 @@ struct kvaser_cmd {
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
|
||||
/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
|
||||
#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
|
||||
@ -869,7 +871,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
struct net_device_stats *stats;
|
||||
enum can_state new_state, old_state;
|
||||
|
||||
old_state = priv->can.state;
|
||||
@ -919,9 +920,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
|
||||
cf->data[6] = bec->txerr;
|
||||
cf->data[7] = bec->rxerr;
|
||||
|
||||
stats = &netdev->stats;
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1074,8 +1072,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
|
||||
cf->data[6] = bec.txerr;
|
||||
cf->data[7] = bec.rxerr;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
priv->bec.txerr = bec.txerr;
|
||||
@ -1109,8 +1105,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
|
||||
}
|
||||
|
||||
stats->tx_errors++;
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1120,7 +1114,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
|
||||
struct kvaser_usb_tx_urb_context *context;
|
||||
struct kvaser_usb_net_priv *priv;
|
||||
unsigned long irq_flags;
|
||||
unsigned int len;
|
||||
bool one_shot_fail = false;
|
||||
bool is_err_frame = false;
|
||||
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
|
||||
|
||||
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
|
||||
@ -1139,24 +1135,28 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
|
||||
kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
|
||||
one_shot_fail = true;
|
||||
}
|
||||
|
||||
is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
|
||||
flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
|
||||
}
|
||||
|
||||
context = &priv->tx_contexts[transid % dev->max_tx_urbs];
|
||||
if (!one_shot_fail) {
|
||||
struct net_device_stats *stats = &priv->netdev->stats;
|
||||
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += can_fd_dlc2len(context->dlc);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
|
||||
|
||||
can_get_echo_skb(priv->netdev, context->echo_index, NULL);
|
||||
len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(priv->netdev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
|
||||
|
||||
if (!one_shot_fail && !is_err_frame) {
|
||||
struct net_device_stats *stats = &priv->netdev->stats;
|
||||
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += len;
|
||||
}
|
||||
}
|
||||
|
||||
static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
|
||||
@ -1208,13 +1208,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
|
||||
|
||||
cf->len = can_cc_dlc2len(cmd->rx_can.dlc);
|
||||
|
||||
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
|
||||
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cf->data, cmd->rx_can.data, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1286,13 +1288,15 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
|
||||
cf->len = can_cc_dlc2len(dlc);
|
||||
}
|
||||
|
||||
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
|
||||
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1371,8 +1375,8 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
|
||||
|
||||
static void *
|
||||
kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
int *cmd_len, u16 transid)
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
u16 transid)
|
||||
{
|
||||
struct kvaser_usb *dev = priv->dev;
|
||||
struct kvaser_cmd_ext *cmd;
|
||||
@ -1384,8 +1388,6 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
|
||||
u32 kcan_id;
|
||||
u32 kcan_header;
|
||||
|
||||
*frame_len = nbr_of_bytes;
|
||||
|
||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
return NULL;
|
||||
@ -1451,8 +1453,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
|
||||
|
||||
static void *
|
||||
kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
int *cmd_len, u16 transid)
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
u16 transid)
|
||||
{
|
||||
struct kvaser_usb *dev = priv->dev;
|
||||
struct kvaser_cmd *cmd;
|
||||
@ -1460,8 +1462,6 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
|
||||
u32 flags;
|
||||
u32 id;
|
||||
|
||||
*frame_len = cf->len;
|
||||
|
||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
return NULL;
|
||||
@ -1495,7 +1495,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
|
||||
cmd->tx_can.id = cpu_to_le32(id);
|
||||
cmd->tx_can.flags = flags;
|
||||
|
||||
memcpy(cmd->tx_can.data, cf->data, *frame_len);
|
||||
memcpy(cmd->tx_can.data, cf->data, cf->len);
|
||||
|
||||
return cmd;
|
||||
}
|
||||
@ -2003,17 +2003,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
|
||||
|
||||
static void *
|
||||
kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
int *cmd_len, u16 transid)
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
u16 transid)
|
||||
{
|
||||
void *buf;
|
||||
|
||||
if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
|
||||
buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
|
||||
cmd_len, transid);
|
||||
buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len,
|
||||
transid);
|
||||
else
|
||||
buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
|
||||
cmd_len, transid);
|
||||
buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len,
|
||||
transid);
|
||||
|
||||
return buf;
|
||||
}
|
||||
@ -2040,7 +2040,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
|
||||
.clock = {
|
||||
.freq = 80000000,
|
||||
.freq = 80 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 80,
|
||||
.bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
|
||||
@ -2049,7 +2049,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
|
||||
.clock = {
|
||||
.freq = 24000000,
|
||||
.freq = 24 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
|
||||
@ -2057,7 +2057,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
|
||||
.clock = {
|
||||
.freq = 80000000,
|
||||
.freq = 80 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 24,
|
||||
.bittiming_const = &kvaser_usb_hydra_rt_bittiming_c,
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/units.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#include <linux/can.h>
|
||||
@ -356,7 +357,7 @@ static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
|
||||
.clock = {
|
||||
.freq = 8000000,
|
||||
.freq = 8 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
@ -364,7 +365,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
|
||||
.clock = {
|
||||
.freq = 16000000,
|
||||
.freq = 16 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
@ -372,7 +373,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
|
||||
.clock = {
|
||||
.freq = 24000000,
|
||||
.freq = 24 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
@ -380,7 +381,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
|
||||
.clock = {
|
||||
.freq = 32000000,
|
||||
.freq = 32 * MEGA /* Hz */,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
@ -388,16 +389,14 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
|
||||
|
||||
static void *
|
||||
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
int *cmd_len, u16 transid)
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
u16 transid)
|
||||
{
|
||||
struct kvaser_usb *dev = priv->dev;
|
||||
struct kvaser_cmd *cmd;
|
||||
u8 *cmd_tx_can_flags = NULL; /* GCC */
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
|
||||
*frame_len = cf->len;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (cmd) {
|
||||
cmd->u.tx_can.tid = transid & 0xff;
|
||||
@ -641,8 +640,6 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_RESTARTED;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
} else {
|
||||
netdev_err(priv->netdev,
|
||||
@ -655,12 +652,11 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
}
|
||||
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += context->dlc;
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_get_echo_skb(priv->netdev, context->echo_index, NULL);
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += can_get_echo_skb(priv->netdev,
|
||||
context->echo_index, NULL);
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(priv->netdev);
|
||||
@ -843,8 +839,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
|
||||
cf->data[6] = es->txerr;
|
||||
cf->data[7] = es->rxerr;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -1071,7 +1065,8 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
|
||||
}
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,6 @@
|
||||
struct mcba_usb_ctx {
|
||||
struct mcba_priv *priv;
|
||||
u32 ndx;
|
||||
u8 dlc;
|
||||
bool can;
|
||||
};
|
||||
|
||||
@ -184,13 +183,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
|
||||
ctx = &priv->tx_context[i];
|
||||
ctx->ndx = i;
|
||||
|
||||
if (cf) {
|
||||
if (cf)
|
||||
ctx->can = true;
|
||||
ctx->dlc = cf->len;
|
||||
} else {
|
||||
else
|
||||
ctx->can = false;
|
||||
ctx->dlc = 0;
|
||||
}
|
||||
|
||||
atomic_dec(&priv->free_ctx_cnt);
|
||||
break;
|
||||
@ -236,10 +232,10 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
|
||||
return;
|
||||
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += ctx->dlc;
|
||||
netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
|
||||
NULL);
|
||||
|
||||
can_led_event(netdev, CAN_LED_EVENT_TX);
|
||||
can_get_echo_skb(netdev, ctx->ndx, NULL);
|
||||
}
|
||||
|
||||
if (urb->status)
|
||||
@ -450,15 +446,16 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
|
||||
cf->can_id = (sid & 0xffe0) >> 5;
|
||||
}
|
||||
|
||||
if (msg->dlc & MCBA_DLC_RTR_MASK)
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
|
||||
cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK);
|
||||
|
||||
memcpy(cf->data, msg->data, cf->len);
|
||||
if (msg->dlc & MCBA_DLC_RTR_MASK) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
} else {
|
||||
memcpy(cf->data, msg->data, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
|
||||
netif_rx(skb);
|
||||
|
@ -8,6 +8,7 @@
|
||||
*
|
||||
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
|
||||
*/
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/module.h>
|
||||
@ -520,8 +521,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
|
||||
&hwts->hwtstamp);
|
||||
}
|
||||
|
||||
mc->netdev->stats.rx_packets++;
|
||||
mc->netdev->stats.rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
@ -678,15 +677,16 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
|
||||
/* Ignore next byte (client private id) if SRR bit is set */
|
||||
if (can_id_flags & PCAN_USB_TX_SRR)
|
||||
mc->ptr++;
|
||||
|
||||
/* update statistics */
|
||||
mc->netdev->stats.rx_bytes += cf->len;
|
||||
}
|
||||
mc->netdev->stats.rx_packets++;
|
||||
|
||||
/* convert timestamp into kernel time */
|
||||
hwts = skb_hwtstamps(skb);
|
||||
peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
|
||||
|
||||
/* update statistics */
|
||||
mc->netdev->stats.rx_packets++;
|
||||
mc->netdev->stats.rx_bytes += cf->len;
|
||||
/* push the skb */
|
||||
netif_rx(skb);
|
||||
|
||||
@ -883,6 +883,11 @@ static int pcan_usb_init(struct peak_usb_device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
dev_info(dev->netdev->dev.parent,
|
||||
"PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
|
||||
pcan_usb.name, dev->device_rev, serial_number,
|
||||
pcan_usb.ctrl_count);
|
||||
|
||||
/* Since rev 4.1, PCAN-USB is able to make single-shot as well as
|
||||
* looped back frames.
|
||||
*/
|
||||
@ -896,11 +901,6 @@ static int pcan_usb_init(struct peak_usb_device *dev)
|
||||
"Firmware update available. Please contact support@peak-system.com\n");
|
||||
}
|
||||
|
||||
dev_info(dev->netdev->dev.parent,
|
||||
"PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
|
||||
pcan_usb.name, dev->device_rev, serial_number,
|
||||
pcan_usb.ctrl_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -205,6 +205,19 @@ int peak_usb_netif_rx(struct sk_buff *skb,
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
/* post received skb with native 64-bit hw timestamp */
|
||||
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high)
|
||||
{
|
||||
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
|
||||
u64 ns_ts;
|
||||
|
||||
ns_ts = (u64)ts_high << 32 | ts_low;
|
||||
ns_ts *= NSEC_PER_USEC;
|
||||
hwts->hwtstamp = ns_to_ktime(ns_ts);
|
||||
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* callback for bulk Rx urb
|
||||
*/
|
||||
@ -278,6 +291,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
|
||||
struct peak_tx_urb_context *context = urb->context;
|
||||
struct peak_usb_device *dev;
|
||||
struct net_device *netdev;
|
||||
int tx_bytes;
|
||||
|
||||
BUG_ON(!context);
|
||||
|
||||
@ -292,10 +306,6 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
|
||||
/* check tx status */
|
||||
switch (urb->status) {
|
||||
case 0:
|
||||
/* transmission complete */
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += context->data_len;
|
||||
|
||||
/* prevent tx timeout */
|
||||
netif_trans_update(netdev);
|
||||
break;
|
||||
@ -314,12 +324,17 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
|
||||
}
|
||||
|
||||
/* should always release echo skb and corresponding context */
|
||||
can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
context->echo_index = PCAN_USB_MAX_TX_URBS;
|
||||
|
||||
/* do wakeup tx queue in case of success only */
|
||||
if (!urb->status)
|
||||
if (!urb->status) {
|
||||
/* transmission complete */
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += tx_bytes;
|
||||
|
||||
/* do wakeup tx queue in case of success only */
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -331,7 +346,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
|
||||
struct peak_usb_device *dev = netdev_priv(netdev);
|
||||
struct peak_tx_urb_context *context = NULL;
|
||||
struct net_device_stats *stats = &netdev->stats;
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
struct urb *urb;
|
||||
u8 *obuf;
|
||||
int i, err;
|
||||
@ -365,9 +379,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
|
||||
|
||||
context->echo_index = i;
|
||||
|
||||
/* Note: this works with CANFD frames too */
|
||||
context->data_len = cfd->len;
|
||||
|
||||
usb_anchor_urb(urb, &dev->tx_submitted);
|
||||
|
||||
can_put_echo_skb(skb, netdev, context->echo_index, 0);
|
||||
|
@ -99,7 +99,6 @@ struct peak_time_ref {
|
||||
struct peak_tx_urb_context {
|
||||
struct peak_usb_device *dev;
|
||||
u32 echo_index;
|
||||
u8 data_len;
|
||||
struct urb *urb;
|
||||
};
|
||||
|
||||
@ -143,6 +142,7 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
|
||||
void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
|
||||
int peak_usb_netif_rx(struct sk_buff *skb,
|
||||
struct peak_time_ref *time_ref, u32 ts_low);
|
||||
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
|
||||
void peak_usb_async_complete(struct urb *urb);
|
||||
void peak_usb_restart_complete(struct peak_usb_device *dev);
|
||||
|
||||
|
@ -507,15 +507,16 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
|
||||
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
|
||||
cfd->can_id |= CAN_EFF_FLAG;
|
||||
|
||||
if (rx_msg_flags & PUCAN_MSG_RTR)
|
||||
if (rx_msg_flags & PUCAN_MSG_RTR) {
|
||||
cfd->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cfd->data, rm->d, cfd->len);
|
||||
|
||||
netdev->stats.rx_bytes += cfd->len;
|
||||
}
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += cfd->len;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
|
||||
peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
|
||||
le32_to_cpu(rm->ts_high));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -576,10 +577,8 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += cf->len;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
|
||||
peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
|
||||
le32_to_cpu(sm->ts_high));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -629,7 +628,8 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
|
||||
peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low),
|
||||
le32_to_cpu(ov->ts_high));
|
||||
|
||||
netdev->stats.rx_over_errors++;
|
||||
netdev->stats.rx_errors++;
|
||||
|
@ -536,17 +536,19 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
|
||||
if (rx->flags & PCAN_USBPRO_EXT)
|
||||
can_frame->can_id |= CAN_EFF_FLAG;
|
||||
|
||||
if (rx->flags & PCAN_USBPRO_RTR)
|
||||
if (rx->flags & PCAN_USBPRO_RTR) {
|
||||
can_frame->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(can_frame->data, rx->data, can_frame->len);
|
||||
|
||||
netdev->stats.rx_bytes += can_frame->len;
|
||||
}
|
||||
netdev->stats.rx_packets++;
|
||||
|
||||
hwts = skb_hwtstamps(skb);
|
||||
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
|
||||
&hwts->hwtstamp);
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += can_frame->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
@ -660,8 +662,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
|
||||
|
||||
hwts = skb_hwtstamps(skb);
|
||||
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += can_frame->len;
|
||||
netif_rx(skb);
|
||||
|
||||
return 0;
|
||||
|
@ -259,7 +259,6 @@ struct ucan_priv;
|
||||
/* Context Information for transmission URBs */
|
||||
struct ucan_urb_context {
|
||||
struct ucan_priv *up;
|
||||
u8 dlc;
|
||||
bool allocated;
|
||||
};
|
||||
|
||||
@ -621,8 +620,11 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
|
||||
memcpy(cf->data, m->msg.can_msg.data, cf->len);
|
||||
|
||||
/* don't count error frames as real packets */
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (!(cf->can_id & CAN_ERR_FLAG)) {
|
||||
stats->rx_packets++;
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
|
||||
/* pass it to Linux */
|
||||
netif_rx(skb);
|
||||
@ -634,7 +636,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
|
||||
{
|
||||
unsigned long flags;
|
||||
u16 count, i;
|
||||
u8 echo_index, dlc;
|
||||
u8 echo_index;
|
||||
u16 len = le16_to_cpu(m->len);
|
||||
|
||||
struct ucan_urb_context *context;
|
||||
@ -658,7 +660,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
|
||||
|
||||
/* gather information from the context */
|
||||
context = &up->context_array[echo_index];
|
||||
dlc = READ_ONCE(context->dlc);
|
||||
|
||||
/* Release context and restart queue if necessary.
|
||||
* Also check if the context was allocated
|
||||
@ -671,8 +672,8 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
|
||||
UCAN_TX_COMPLETE_SUCCESS) {
|
||||
/* update statistics */
|
||||
up->netdev->stats.tx_packets++;
|
||||
up->netdev->stats.tx_bytes += dlc;
|
||||
can_get_echo_skb(up->netdev, echo_index, NULL);
|
||||
up->netdev->stats.tx_bytes +=
|
||||
can_get_echo_skb(up->netdev, echo_index, NULL);
|
||||
} else {
|
||||
up->netdev->stats.tx_dropped++;
|
||||
can_free_echo_skb(up->netdev, echo_index, NULL);
|
||||
@ -1086,8 +1087,6 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
|
||||
}
|
||||
m->len = cpu_to_le16(mlen);
|
||||
|
||||
context->dlc = cf->len;
|
||||
|
||||
m->subtype = echo_index;
|
||||
|
||||
/* build the urb */
|
||||
|
@ -114,15 +114,12 @@ struct usb_8dev_tx_urb_context {
|
||||
struct usb_8dev_priv *priv;
|
||||
|
||||
u32 echo_index;
|
||||
u8 dlc;
|
||||
};
|
||||
|
||||
/* Structure to hold all of our device specific stuff */
|
||||
struct usb_8dev_priv {
|
||||
struct can_priv can; /* must be the first member */
|
||||
|
||||
struct sk_buff *echo_skb[MAX_TX_URBS];
|
||||
|
||||
struct usb_device *udev;
|
||||
struct net_device *netdev;
|
||||
|
||||
@ -449,8 +446,6 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
|
||||
priv->bec.txerr = txerr;
|
||||
priv->bec.rxerr = rxerr;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -476,13 +471,14 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
|
||||
if (msg->flags & USB_8DEV_EXTID)
|
||||
cf->can_id |= CAN_EFF_FLAG;
|
||||
|
||||
if (msg->flags & USB_8DEV_RTR)
|
||||
if (msg->flags & USB_8DEV_RTR) {
|
||||
cf->can_id |= CAN_RTR_FLAG;
|
||||
else
|
||||
} else {
|
||||
memcpy(cf->data, msg->data, cf->len);
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
|
||||
@ -584,9 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
|
||||
urb->status);
|
||||
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += context->dlc;
|
||||
|
||||
can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
|
||||
|
||||
can_led_event(netdev, CAN_LED_EVENT_TX);
|
||||
|
||||
@ -657,7 +651,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
|
||||
|
||||
context->priv = priv;
|
||||
context->echo_index = i;
|
||||
context->dlc = cf->len;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
|
||||
|
@ -87,13 +87,14 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
int loop;
|
||||
int loop, len;
|
||||
|
||||
if (can_dropped_invalid_skb(dev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += cfd->len;
|
||||
stats->tx_bytes += len;
|
||||
|
||||
/* set flag whether this packet has to be looped back */
|
||||
loop = skb->pkt_type == PACKET_LOOPBACK;
|
||||
@ -105,7 +106,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
* CAN core already did the echo for us
|
||||
*/
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cfd->len;
|
||||
stats->rx_bytes += len;
|
||||
}
|
||||
consume_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -62,7 +62,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
skb->dev = peer;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
len = cfd->len;
|
||||
len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
|
||||
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
|
||||
srcstats->tx_packets++;
|
||||
srcstats->tx_bytes += len;
|
||||
|
@ -516,8 +516,7 @@ static int xcan_chip_start(struct net_device *ndev)
|
||||
* @ndev: Pointer to net_device structure
|
||||
* @mode: Tells the mode of the driver
|
||||
*
|
||||
* This check the drivers state and calls the
|
||||
* the corresponding modes to set.
|
||||
* This check the drivers state and calls the corresponding modes to set.
|
||||
*
|
||||
* Return: 0 on success and failure value on error
|
||||
*/
|
||||
@ -788,10 +787,11 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
|
||||
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
|
||||
if (cf->len > 4)
|
||||
*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
|
||||
}
|
||||
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_bytes += cf->len;
|
||||
}
|
||||
stats->rx_packets++;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
@ -872,8 +872,11 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base)
|
||||
*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
|
||||
}
|
||||
}
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
if (!(cf->can_id & CAN_RTR_FLAG))
|
||||
stats->rx_bytes += cf->len;
|
||||
stats->rx_packets++;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
@ -966,13 +969,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
|
||||
|
||||
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
|
||||
|
||||
if (skb) {
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
if (skb)
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -982,7 +980,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
|
||||
* @isr: interrupt status register value
|
||||
*
|
||||
* This is the CAN error interrupt and it will
|
||||
* check the the type of error and forward the error
|
||||
* check the type of error and forward the error
|
||||
* frame to upper layers.
|
||||
*/
|
||||
static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
||||
@ -1096,8 +1094,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
||||
if (skb) {
|
||||
skb_cf->can_id |= cf.can_id;
|
||||
memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += CAN_ERR_DLC;
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
@ -1849,11 +1845,9 @@ static int xcan_probe(struct platform_device *pdev)
|
||||
static int xcan_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct xcan_priv *priv = netdev_priv(ndev);
|
||||
|
||||
unregister_candev(ndev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
netif_napi_del(&priv->napi);
|
||||
free_candev(ndev);
|
||||
|
||||
return 0;
|
||||
|
@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
|
||||
return;
|
||||
|
||||
/* Enable flow control on BCM5301x's CPU port */
|
||||
if (is5301x(dev) && port == dev->cpu_port)
|
||||
if (is5301x(dev) && dsa_is_cpu_port(ds, port))
|
||||
tx_pause = rx_pause = true;
|
||||
|
||||
if (phydev->pause) {
|
||||
@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else if (is5301x(dev)) {
|
||||
if (port != dev->cpu_port) {
|
||||
b53_force_port_config(dev, dev->cpu_port, 2000,
|
||||
DUPLEX_FULL, true, true);
|
||||
b53_force_link(dev, dev->cpu_port, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Re-negotiate EEE if it was enabled already */
|
||||
@ -1349,10 +1343,8 @@ void b53_phylink_validate(struct dsa_switch *ds, int port,
|
||||
phylink_set(mask, 100baseT_Full);
|
||||
}
|
||||
|
||||
bitmap_and(supported, supported, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
bitmap_and(state->advertising, state->advertising, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_and(supported, supported, mask);
|
||||
linkmode_and(state->advertising, state->advertising, mask);
|
||||
|
||||
phylink_helper_basex_speed(state);
|
||||
}
|
||||
@ -1550,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_del);
|
||||
|
||||
/* Address Resolution Logic routines */
|
||||
/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
|
||||
static int b53_arl_op_wait(struct b53_device *dev)
|
||||
{
|
||||
unsigned int timeout = 10;
|
||||
@ -1715,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
int ret;
|
||||
|
||||
/* 5325 and 5365 require some more massaging, but could
|
||||
* be supported eventually
|
||||
@ -1722,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
|
||||
if (is5325(priv) || is5365(priv))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return b53_arl_op(priv, 0, port, addr, vid, true);
|
||||
mutex_lock(&priv->arl_mutex);
|
||||
ret = b53_arl_op(priv, 0, port, addr, vid, true);
|
||||
mutex_unlock(&priv->arl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_add);
|
||||
|
||||
@ -1730,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
int ret;
|
||||
|
||||
return b53_arl_op(priv, 0, port, addr, vid, false);
|
||||
mutex_lock(&priv->arl_mutex);
|
||||
ret = b53_arl_op(priv, 0, port, addr, vid, false);
|
||||
mutex_unlock(&priv->arl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_del);
|
||||
|
||||
@ -1788,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
int ret;
|
||||
u8 reg;
|
||||
|
||||
mutex_lock(&priv->arl_mutex);
|
||||
|
||||
/* Start search operation */
|
||||
reg = ARL_SRCH_STDN;
|
||||
b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
|
||||
@ -1795,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
do {
|
||||
ret = b53_arl_search_wait(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
b53_arl_search_rd(priv, 0, &results[0]);
|
||||
ret = b53_fdb_copy(port, &results[0], cb, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
if (priv->num_arl_bins > 2) {
|
||||
b53_arl_search_rd(priv, 1, &results[1]);
|
||||
ret = b53_fdb_copy(port, &results[1], cb, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
if (!results[0].is_valid && !results[1].is_valid)
|
||||
break;
|
||||
@ -1814,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
|
||||
} while (count++ < b53_max_arl_entries(priv) / 2);
|
||||
|
||||
mutex_unlock(&priv->arl_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_dump);
|
||||
@ -1822,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_mdb *mdb)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
int ret;
|
||||
|
||||
/* 5325 and 5365 require some more massaging, but could
|
||||
* be supported eventually
|
||||
@ -1829,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
|
||||
if (is5325(priv) || is5365(priv))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
|
||||
mutex_lock(&priv->arl_mutex);
|
||||
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
|
||||
mutex_unlock(&priv->arl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_mdb_add);
|
||||
|
||||
@ -1839,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
|
||||
struct b53_device *priv = ds->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&priv->arl_mutex);
|
||||
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
|
||||
mutex_unlock(&priv->arl_mutex);
|
||||
if (ret)
|
||||
dev_err(ds->dev, "failed to delete MDB entry\n");
|
||||
|
||||
@ -1847,7 +1860,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
|
||||
}
|
||||
EXPORT_SYMBOL(b53_mdb_del);
|
||||
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
|
||||
bool *tx_fwd_offload)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
|
||||
@ -1874,7 +1888,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
|
||||
|
||||
b53_for_each_port(dev, i) {
|
||||
if (dsa_to_port(ds, i)->bridge_dev != br)
|
||||
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||
continue;
|
||||
|
||||
/* Add this local port to the remote port VLAN control
|
||||
@ -1898,7 +1912,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
}
|
||||
EXPORT_SYMBOL(b53_br_join);
|
||||
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
struct b53_vlan *vl = &dev->vlans[0];
|
||||
@ -1910,7 +1924,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
|
||||
b53_for_each_port(dev, i) {
|
||||
/* Don't touch the remaining ports */
|
||||
if (dsa_to_port(ds, i)->bridge_dev != br)
|
||||
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||
continue;
|
||||
|
||||
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
|
||||
@ -2302,33 +2316,30 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM5325_DEVICE_ID,
|
||||
.dev_name = "BCM5325",
|
||||
.vlans = 16,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x3f,
|
||||
.arl_bins = 2,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 5,
|
||||
.cpu_port = B53_CPU_PORT_25,
|
||||
.duplex_reg = B53_DUPLEX_STAT_FE,
|
||||
},
|
||||
{
|
||||
.chip_id = BCM5365_DEVICE_ID,
|
||||
.dev_name = "BCM5365",
|
||||
.vlans = 256,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x3f,
|
||||
.arl_bins = 2,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 5,
|
||||
.cpu_port = B53_CPU_PORT_25,
|
||||
.duplex_reg = B53_DUPLEX_STAT_FE,
|
||||
},
|
||||
{
|
||||
.chip_id = BCM5389_DEVICE_ID,
|
||||
.dev_name = "BCM5389",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x11f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2338,11 +2349,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM5395_DEVICE_ID,
|
||||
.dev_name = "BCM5395",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x11f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2352,11 +2362,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM5397_DEVICE_ID,
|
||||
.dev_name = "BCM5397",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x11f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_9798,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2366,11 +2375,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM5398_DEVICE_ID,
|
||||
.dev_name = "BCM5398",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x7f,
|
||||
.enabled_ports = 0x17f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_9798,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2380,12 +2388,11 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM53115_DEVICE_ID,
|
||||
.dev_name = "BCM53115",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x11f,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
|
||||
@ -2394,11 +2401,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM53125_DEVICE_ID,
|
||||
.dev_name = "BCM53125",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0xff,
|
||||
.enabled_ports = 0x1ff,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2412,7 +2418,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2426,7 +2431,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS_63XX,
|
||||
.duplex_reg = B53_DUPLEX_STAT_63XX,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
|
||||
@ -2436,11 +2440,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM53010_DEVICE_ID,
|
||||
.dev_name = "BCM53010",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x1bf,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2454,7 +2457,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2468,7 +2470,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2478,11 +2479,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM53018_DEVICE_ID,
|
||||
.dev_name = "BCM53018",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x1bf,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2492,11 +2492,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.chip_id = BCM53019_DEVICE_ID,
|
||||
.dev_name = "BCM53019",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.enabled_ports = 0x1bf,
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2510,7 +2509,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2524,7 +2522,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2539,7 +2536,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 256,
|
||||
.imp_port = 8,
|
||||
.cpu_port = 8, /* TODO: ports 4, 5, 8 */
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2553,7 +2549,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 1024,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2567,7 +2562,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
||||
.arl_bins = 4,
|
||||
.arl_buckets = 256,
|
||||
.imp_port = 8,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
@ -2593,7 +2587,6 @@ static int b53_switch_init(struct b53_device *dev)
|
||||
dev->vta_regs[2] = chip->vta_regs[2];
|
||||
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
|
||||
dev->imp_port = chip->imp_port;
|
||||
dev->cpu_port = chip->cpu_port;
|
||||
dev->num_vlans = chip->vlans;
|
||||
dev->num_arl_bins = chip->arl_bins;
|
||||
dev->num_arl_buckets = chip->arl_buckets;
|
||||
@ -2625,16 +2618,8 @@ static int b53_switch_init(struct b53_device *dev)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
} else if (dev->chip_id == BCM53115_DEVICE_ID) {
|
||||
u64 strap_value;
|
||||
|
||||
b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
|
||||
/* use second IMP port if GMII is enabled */
|
||||
if (strap_value & SV_GMII_CTRL_115)
|
||||
dev->cpu_port = 5;
|
||||
}
|
||||
|
||||
dev->enabled_ports |= BIT(dev->cpu_port);
|
||||
dev->num_ports = fls(dev->enabled_ports);
|
||||
|
||||
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
|
||||
@ -2705,6 +2690,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
|
||||
|
||||
mutex_init(&dev->reg_mutex);
|
||||
mutex_init(&dev->stats_mutex);
|
||||
mutex_init(&dev->arl_mutex);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ struct b53_device {
|
||||
|
||||
struct mutex reg_mutex;
|
||||
struct mutex stats_mutex;
|
||||
struct mutex arl_mutex;
|
||||
const struct b53_io_ops *ops;
|
||||
|
||||
/* chip specific data */
|
||||
@ -124,7 +125,6 @@ struct b53_device {
|
||||
/* used ports mask */
|
||||
u16 enabled_ports;
|
||||
unsigned int imp_port;
|
||||
unsigned int cpu_port;
|
||||
|
||||
/* connect specific data */
|
||||
u8 current_page;
|
||||
@ -324,8 +324,9 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
|
||||
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
|
||||
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
|
||||
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
|
||||
bool *tx_fwd_offload);
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
|
||||
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
|
||||
void b53_br_fast_age(struct dsa_switch *ds, int port);
|
||||
int b53_br_flags_pre(struct dsa_switch *ds, int port,
|
||||
|
@ -674,7 +674,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
|
||||
}
|
||||
|
||||
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
|
||||
struct net_device *br)
|
||||
struct dsa_bridge bridge,
|
||||
bool *tx_fwd_offload)
|
||||
{
|
||||
struct hellcreek *hellcreek = ds->priv;
|
||||
|
||||
@ -691,7 +692,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
|
||||
}
|
||||
|
||||
static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||
struct net_device *br)
|
||||
struct dsa_bridge bridge)
|
||||
{
|
||||
struct hellcreek *hellcreek = ds->priv;
|
||||
|
||||
@ -1457,14 +1458,19 @@ static void hellcreek_teardown(struct dsa_switch *ds)
|
||||
dsa_devlink_resources_unregister(ds);
|
||||
}
|
||||
|
||||
static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
|
||||
unsigned long *supported,
|
||||
struct phylink_link_state *state)
|
||||
static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
|
||||
struct phylink_config *config)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
||||
struct hellcreek *hellcreek = ds->priv;
|
||||
|
||||
dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
|
||||
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
|
||||
__set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
|
||||
|
||||
/* Include GMII - the hardware does not support this interface
|
||||
* mode, but it's the default interface mode for phylib, so we
|
||||
* need it for compatibility with existing DT.
|
||||
*/
|
||||
__set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
|
||||
|
||||
/* The MAC settings are a hardware configuration option and cannot be
|
||||
* changed at run time or by strapping. Therefore the attached PHYs
|
||||
@ -1472,14 +1478,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
|
||||
* by the hardware.
|
||||
*/
|
||||
if (hellcreek->pdata->is_100_mbits)
|
||||
phylink_set(mask, 100baseT_Full);
|
||||
config->mac_capabilities = MAC_100FD;
|
||||
else
|
||||
phylink_set(mask, 1000baseT_Full);
|
||||
|
||||
bitmap_and(supported, supported, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
bitmap_and(state->advertising, state->advertising, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
config->mac_capabilities = MAC_1000FD;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1830,7 +1831,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
|
||||
.get_strings = hellcreek_get_strings,
|
||||
.get_tag_protocol = hellcreek_get_tag_protocol,
|
||||
.get_ts_info = hellcreek_get_ts_info,
|
||||
.phylink_validate = hellcreek_phylink_validate,
|
||||
.phylink_get_caps = hellcreek_phylink_get_caps,
|
||||
.port_bridge_flags = hellcreek_bridge_flags,
|
||||
.port_bridge_join = hellcreek_port_bridge_join,
|
||||
.port_bridge_leave = hellcreek_port_bridge_leave,
|
||||
|
@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
|
||||
*/
|
||||
clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
|
||||
|
||||
/* Reserved for future extensions */
|
||||
if (config->flags)
|
||||
return -EINVAL;
|
||||
|
||||
switch (config->tx_type) {
|
||||
case HWTSTAMP_TX_ON:
|
||||
tx_tstamp_enable = true;
|
||||
|
@ -21,6 +21,7 @@ config NET_DSA_MSCC_SEVILLE
|
||||
depends on NET_VENDOR_MICROSEMI
|
||||
depends on HAS_IOMEM
|
||||
depends on PTP_1588_CLOCK_OPTIONAL
|
||||
select MDIO_MSCC_MIIM
|
||||
select MSCC_OCELOT_SWITCH_LIB
|
||||
select NET_DSA_TAG_OCELOT_8021Q
|
||||
select NET_DSA_TAG_OCELOT
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pcs-lynx.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/dsa.h>
|
||||
#include "felix.h"
|
||||
@ -240,24 +239,32 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
|
||||
*/
|
||||
static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
|
||||
{
|
||||
mutex_lock(&ocelot->fwd_domain_lock);
|
||||
|
||||
ocelot->ports[port]->is_dsa_8021q_cpu = true;
|
||||
ocelot->npi = -1;
|
||||
|
||||
/* Overwrite PGID_CPU with the non-tagging port */
|
||||
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
|
||||
|
||||
ocelot_apply_bridge_fwd_mask(ocelot);
|
||||
ocelot_apply_bridge_fwd_mask(ocelot, true);
|
||||
|
||||
mutex_unlock(&ocelot->fwd_domain_lock);
|
||||
}
|
||||
|
||||
static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
|
||||
{
|
||||
mutex_lock(&ocelot->fwd_domain_lock);
|
||||
|
||||
ocelot->ports[port]->is_dsa_8021q_cpu = false;
|
||||
|
||||
/* Restore PGID_CPU */
|
||||
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
|
||||
PGID_CPU);
|
||||
|
||||
ocelot_apply_bridge_fwd_mask(ocelot);
|
||||
ocelot_apply_bridge_fwd_mask(ocelot, true);
|
||||
|
||||
mutex_unlock(&ocelot->fwd_domain_lock);
|
||||
}
|
||||
|
||||
/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
|
||||
@ -632,6 +639,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void felix_port_fast_age(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
int err;
|
||||
|
||||
err = ocelot_mact_flush(ocelot, port);
|
||||
if (err)
|
||||
dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
|
||||
port, ERR_PTR(err));
|
||||
}
|
||||
|
||||
static int felix_fdb_dump(struct dsa_switch *ds, int port,
|
||||
dsa_fdb_dump_cb_t *cb, void *data)
|
||||
{
|
||||
@ -701,21 +719,21 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
|
||||
}
|
||||
|
||||
static int felix_bridge_join(struct dsa_switch *ds, int port,
|
||||
struct net_device *br)
|
||||
struct dsa_bridge bridge, bool *tx_fwd_offload)
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
|
||||
ocelot_port_bridge_join(ocelot, port, br);
|
||||
ocelot_port_bridge_join(ocelot, port, bridge.dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void felix_bridge_leave(struct dsa_switch *ds, int port,
|
||||
struct net_device *br)
|
||||
struct dsa_bridge bridge)
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
|
||||
ocelot_port_bridge_leave(ocelot, port, br);
|
||||
ocelot_port_bridge_leave(ocelot, port, bridge.dev);
|
||||
}
|
||||
|
||||
static int felix_lag_join(struct dsa_switch *ds, int port,
|
||||
@ -823,8 +841,8 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
|
||||
struct felix *felix = ocelot_to_felix(ocelot);
|
||||
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||
|
||||
if (felix->pcs[port])
|
||||
phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
|
||||
if (felix->pcs && felix->pcs[port])
|
||||
phylink_set_pcs(dp->pl, felix->pcs[port]);
|
||||
}
|
||||
|
||||
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
|
||||
@ -961,8 +979,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
|
||||
switch_node = dev->of_node;
|
||||
|
||||
ports_node = of_get_child_by_name(switch_node, "ports");
|
||||
if (!ports_node)
|
||||
ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
|
||||
if (!ports_node) {
|
||||
dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
|
||||
dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -990,6 +1010,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
|
||||
ocelot->num_stats = felix->info->num_stats;
|
||||
ocelot->num_mact_rows = felix->info->num_mact_rows;
|
||||
ocelot->vcap = felix->info->vcap;
|
||||
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
|
||||
ocelot->vcap_pol.max = felix->info->vcap_pol_max;
|
||||
ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
|
||||
ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
|
||||
ocelot->ops = felix->info->ops;
|
||||
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
|
||||
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
|
||||
@ -1017,7 +1041,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
|
||||
res.start += felix->switch_base;
|
||||
res.end += felix->switch_base;
|
||||
|
||||
target = ocelot_regmap_init(ocelot, &res);
|
||||
target = felix->info->init_regmap(ocelot, &res);
|
||||
if (IS_ERR(target)) {
|
||||
dev_err(ocelot->dev,
|
||||
"Failed to map device memory space\n");
|
||||
@ -1054,7 +1078,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
|
||||
res.start += felix->switch_base;
|
||||
res.end += felix->switch_base;
|
||||
|
||||
target = ocelot_regmap_init(ocelot, &res);
|
||||
target = felix->info->init_regmap(ocelot, &res);
|
||||
if (IS_ERR(target)) {
|
||||
dev_err(ocelot->dev,
|
||||
"Failed to map memory space for port %d\n",
|
||||
@ -1141,38 +1165,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
|
||||
kfree(xmit_work);
|
||||
}
|
||||
|
||||
static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
|
||||
static int felix_connect_tag_protocol(struct dsa_switch *ds,
|
||||
enum dsa_tag_protocol proto)
|
||||
{
|
||||
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
struct felix *felix = ocelot_to_felix(ocelot);
|
||||
struct felix_port *felix_port;
|
||||
struct ocelot_8021q_tagger_data *tagger_data;
|
||||
|
||||
if (!dsa_port_is_user(dp))
|
||||
switch (proto) {
|
||||
case DSA_TAG_PROTO_OCELOT_8021Q:
|
||||
tagger_data = ocelot_8021q_tagger_data(ds);
|
||||
tagger_data->xmit_work_fn = felix_port_deferred_xmit;
|
||||
return 0;
|
||||
|
||||
felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
|
||||
if (!felix_port)
|
||||
return -ENOMEM;
|
||||
|
||||
felix_port->xmit_worker = felix->xmit_worker;
|
||||
felix_port->xmit_work_fn = felix_port_deferred_xmit;
|
||||
|
||||
dp->priv = felix_port;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||
struct felix_port *felix_port = dp->priv;
|
||||
|
||||
if (!felix_port)
|
||||
return;
|
||||
|
||||
dp->priv = NULL;
|
||||
kfree(felix_port);
|
||||
case DSA_TAG_PROTO_OCELOT:
|
||||
case DSA_TAG_PROTO_SEVILLE:
|
||||
return 0;
|
||||
default:
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Hardware initialization done here so that we can allocate structures with
|
||||
@ -1203,12 +1211,6 @@ static int felix_setup(struct dsa_switch *ds)
|
||||
}
|
||||
}
|
||||
|
||||
felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
|
||||
if (IS_ERR(felix->xmit_worker)) {
|
||||
err = PTR_ERR(felix->xmit_worker);
|
||||
goto out_deinit_timestamp;
|
||||
}
|
||||
|
||||
for (port = 0; port < ds->num_ports; port++) {
|
||||
if (dsa_is_unused_port(ds, port))
|
||||
continue;
|
||||
@ -1219,14 +1221,6 @@ static int felix_setup(struct dsa_switch *ds)
|
||||
* bits of vlan tag.
|
||||
*/
|
||||
felix_port_qos_map_init(ocelot, port);
|
||||
|
||||
err = felix_port_setup_tagger_data(ds, port);
|
||||
if (err) {
|
||||
dev_err(ds->dev,
|
||||
"port %d failed to set up tagger data: %pe\n",
|
||||
port, ERR_PTR(err));
|
||||
goto out_deinit_ports;
|
||||
}
|
||||
}
|
||||
|
||||
err = ocelot_devlink_sb_register(ocelot);
|
||||
@ -1254,13 +1248,9 @@ static int felix_setup(struct dsa_switch *ds)
|
||||
if (dsa_is_unused_port(ds, port))
|
||||
continue;
|
||||
|
||||
felix_port_teardown_tagger_data(ds, port);
|
||||
ocelot_deinit_port(ocelot, port);
|
||||
}
|
||||
|
||||
kthread_destroy_worker(felix->xmit_worker);
|
||||
|
||||
out_deinit_timestamp:
|
||||
ocelot_deinit_timestamp(ocelot);
|
||||
ocelot_deinit(ocelot);
|
||||
|
||||
@ -1289,12 +1279,9 @@ static void felix_teardown(struct dsa_switch *ds)
|
||||
if (dsa_is_unused_port(ds, port))
|
||||
continue;
|
||||
|
||||
felix_port_teardown_tagger_data(ds, port);
|
||||
ocelot_deinit_port(ocelot, port);
|
||||
}
|
||||
|
||||
kthread_destroy_worker(felix->xmit_worker);
|
||||
|
||||
ocelot_devlink_sb_unregister(ocelot);
|
||||
ocelot_deinit_timestamp(ocelot);
|
||||
ocelot_deinit(ocelot);
|
||||
@ -1634,6 +1621,7 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
|
||||
const struct dsa_switch_ops felix_switch_ops = {
|
||||
.get_tag_protocol = felix_get_tag_protocol,
|
||||
.change_tag_protocol = felix_change_tag_protocol,
|
||||
.connect_tag_protocol = felix_connect_tag_protocol,
|
||||
.setup = felix_setup,
|
||||
.teardown = felix_teardown,
|
||||
.set_ageing_time = felix_set_ageing_time,
|
||||
@ -1645,6 +1633,7 @@ const struct dsa_switch_ops felix_switch_ops = {
|
||||
.phylink_mac_config = felix_phylink_mac_config,
|
||||
.phylink_mac_link_down = felix_phylink_mac_link_down,
|
||||
.phylink_mac_link_up = felix_phylink_mac_link_up,
|
||||
.port_fast_age = felix_port_fast_age,
|
||||
.port_fdb_dump = felix_fdb_dump,
|
||||
.port_fdb_add = felix_fdb_add,
|
||||
.port_fdb_del = felix_fdb_del,
|
||||
|
@ -21,8 +21,10 @@ struct felix_info {
|
||||
int num_ports;
|
||||
int num_tx_queues;
|
||||
struct vcap_props *vcap;
|
||||
int switch_pci_bar;
|
||||
int imdio_pci_bar;
|
||||
u16 vcap_pol_base;
|
||||
u16 vcap_pol_max;
|
||||
u16 vcap_pol_base2;
|
||||
u16 vcap_pol_max2;
|
||||
const struct ptp_clock_info *ptp_caps;
|
||||
|
||||
/* Some Ocelot switches are integrated into the SoC without the
|
||||
@ -48,6 +50,8 @@ struct felix_info {
|
||||
enum tc_setup_type type, void *type_data);
|
||||
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
|
||||
u32 speed);
|
||||
struct regmap *(*init_regmap)(struct ocelot *ocelot,
|
||||
struct resource *res);
|
||||
};
|
||||
|
||||
extern const struct dsa_switch_ops felix_switch_ops;
|
||||
@ -58,7 +62,7 @@ struct felix {
|
||||
const struct felix_info *info;
|
||||
struct ocelot ocelot;
|
||||
struct mii_bus *imdio;
|
||||
struct lynx_pcs **pcs;
|
||||
struct phylink_pcs **pcs;
|
||||
resource_size_t switch_base;
|
||||
resource_size_t imdio_base;
|
||||
enum dsa_tag_protocol tag_proto;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,19 +6,18 @@
|
||||
#include <soc/mscc/ocelot_vcap.h>
|
||||
#include <soc/mscc/ocelot_sys.h>
|
||||
#include <soc/mscc/ocelot.h>
|
||||
#include <linux/mdio/mdio-mscc-miim.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pcs-lynx.h>
|
||||
#include <linux/dsa/ocelot.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include "felix.h"
|
||||
|
||||
#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
|
||||
#define MSCC_MIIM_CMD_OPR_READ BIT(2)
|
||||
#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
|
||||
#define MSCC_MIIM_CMD_REGAD_SHIFT 20
|
||||
#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
|
||||
#define MSCC_MIIM_CMD_VLD BIT(31)
|
||||
#define VSC9953_VCAP_POLICER_BASE 11
|
||||
#define VSC9953_VCAP_POLICER_MAX 31
|
||||
#define VSC9953_VCAP_POLICER_BASE2 120
|
||||
#define VSC9953_VCAP_POLICER_MAX2 161
|
||||
|
||||
static const u32 vsc9953_ana_regmap[] = {
|
||||
REG(ANA_ADVLEARN, 0x00b500),
|
||||
@ -858,7 +857,6 @@ static struct vcap_props vsc9953_vcap_props[] = {
|
||||
#define VSC9953_INIT_TIMEOUT 50000
|
||||
#define VSC9953_GCB_RST_SLEEP 100
|
||||
#define VSC9953_SYS_RAMINIT_SLEEP 80
|
||||
#define VCS9953_MII_TIMEOUT 10000
|
||||
|
||||
static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
|
||||
{
|
||||
@ -878,82 +876,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
|
||||
return val;
|
||||
}
|
||||
|
||||
static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot)
|
||||
{
|
||||
int val;
|
||||
|
||||
ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot)
|
||||
{
|
||||
int val;
|
||||
|
||||
ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
|
||||
u16 value)
|
||||
{
|
||||
struct ocelot *ocelot = bus->priv;
|
||||
int err, cmd, val;
|
||||
|
||||
/* Wait while MIIM controller becomes idle */
|
||||
err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
|
||||
val, !val, 10, VCS9953_MII_TIMEOUT);
|
||||
if (err) {
|
||||
dev_err(ocelot->dev, "MDIO write: pending timeout\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
|
||||
(regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
|
||||
(value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
|
||||
MSCC_MIIM_CMD_OPR_WRITE;
|
||||
|
||||
ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
|
||||
{
|
||||
struct ocelot *ocelot = bus->priv;
|
||||
int err, cmd, val;
|
||||
|
||||
/* Wait until MIIM controller becomes idle */
|
||||
err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
|
||||
val, !val, 10, VCS9953_MII_TIMEOUT);
|
||||
if (err) {
|
||||
dev_err(ocelot->dev, "MDIO read: pending timeout\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Write the MIIM COMMAND register */
|
||||
cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
|
||||
(regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ;
|
||||
|
||||
ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
|
||||
|
||||
/* Wait while read operation via the MIIM controller is in progress */
|
||||
err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot,
|
||||
val, !val, 10, VCS9953_MII_TIMEOUT);
|
||||
if (err) {
|
||||
dev_err(ocelot->dev, "MDIO read: busy timeout\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
val = ocelot_read(ocelot, GCB_MIIM_MII_DATA);
|
||||
|
||||
err = val & 0xFFFF;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
|
||||
* MEM_INIT is in SYS:SYSTEM:RESET_CFG
|
||||
@ -1000,7 +922,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
|
||||
|
||||
if (state->interface != PHY_INTERFACE_MODE_NA &&
|
||||
state->interface != ocelot_port->phy_mode) {
|
||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_zero(supported);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1019,10 +941,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
|
||||
phylink_set(mask, 2500baseX_Full);
|
||||
}
|
||||
|
||||
bitmap_and(supported, supported, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
bitmap_and(state->advertising, state->advertising, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_and(supported, supported, mask);
|
||||
linkmode_and(state->advertising, state->advertising, mask);
|
||||
}
|
||||
|
||||
static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
|
||||
@ -1092,23 +1012,21 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
|
||||
int rc;
|
||||
|
||||
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
|
||||
sizeof(struct phy_device *),
|
||||
sizeof(struct phylink_pcs *),
|
||||
GFP_KERNEL);
|
||||
if (!felix->pcs) {
|
||||
dev_err(dev, "failed to allocate array for PCS PHYs\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bus = devm_mdiobus_alloc(dev);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
|
||||
ocelot->targets[GCB],
|
||||
ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]);
|
||||
|
||||
bus->name = "VSC9953 internal MDIO bus";
|
||||
bus->read = vsc9953_mdio_read;
|
||||
bus->write = vsc9953_mdio_write;
|
||||
bus->parent = dev;
|
||||
bus->priv = ocelot;
|
||||
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to setup MDIO bus\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Needed in order to initialize the bus mutex lock */
|
||||
rc = devm_of_mdiobus_register(dev, bus, NULL);
|
||||
@ -1121,9 +1039,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
|
||||
|
||||
for (port = 0; port < felix->info->num_ports; port++) {
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||
struct phylink_pcs *phylink_pcs;
|
||||
struct mdio_device *mdio_device;
|
||||
int addr = port + 4;
|
||||
struct mdio_device *pcs;
|
||||
struct lynx_pcs *lynx;
|
||||
|
||||
if (dsa_is_unused_port(felix->ds, port))
|
||||
continue;
|
||||
@ -1131,17 +1049,17 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
|
||||
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
|
||||
continue;
|
||||
|
||||
pcs = mdio_device_create(felix->imdio, addr);
|
||||
if (IS_ERR(pcs))
|
||||
mdio_device = mdio_device_create(felix->imdio, addr);
|
||||
if (IS_ERR(mdio_device))
|
||||
continue;
|
||||
|
||||
lynx = lynx_pcs_create(pcs);
|
||||
if (!lynx) {
|
||||
mdio_device_free(pcs);
|
||||
phylink_pcs = lynx_pcs_create(mdio_device);
|
||||
if (!phylink_pcs) {
|
||||
mdio_device_free(mdio_device);
|
||||
continue;
|
||||
}
|
||||
|
||||
felix->pcs[port] = lynx;
|
||||
felix->pcs[port] = phylink_pcs;
|
||||
|
||||
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
|
||||
}
|
||||
@ -1155,13 +1073,15 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
|
||||
int port;
|
||||
|
||||
for (port = 0; port < ocelot->num_phys_ports; port++) {
|
||||
struct lynx_pcs *pcs = felix->pcs[port];
|
||||
struct phylink_pcs *phylink_pcs = felix->pcs[port];
|
||||
struct mdio_device *mdio_device;
|
||||
|
||||
if (!pcs)
|
||||
if (!phylink_pcs)
|
||||
continue;
|
||||
|
||||
mdio_device_free(pcs->mdio);
|
||||
lynx_pcs_destroy(pcs);
|
||||
mdio_device = lynx_get_mdio_device(phylink_pcs);
|
||||
mdio_device_free(mdio_device);
|
||||
lynx_pcs_destroy(phylink_pcs);
|
||||
}
|
||||
|
||||
/* mdiobus_unregister and mdiobus_free handled by devres */
|
||||
@ -1176,6 +1096,10 @@ static const struct felix_info seville_info_vsc9953 = {
|
||||
.stats_layout = vsc9953_stats_layout,
|
||||
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
|
||||
.vcap = vsc9953_vcap_props,
|
||||
.vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
|
||||
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
|
||||
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
|
||||
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
|
||||
.num_mact_rows = 2048,
|
||||
.num_ports = 10,
|
||||
.num_tx_queues = OCELOT_NUM_TC,
|
||||
@ -1183,6 +1107,7 @@ static const struct felix_info seville_info_vsc9953 = {
|
||||
.mdio_bus_free = vsc9953_mdio_bus_free,
|
||||
.phylink_validate = vsc9953_phylink_validate,
|
||||
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
|
||||
.init_regmap = ocelot_regmap_init,
|
||||
};
|
||||
|
||||
static int seville_probe(struct platform_device *pdev)
|
||||
|
@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
|
||||
goto unsupported;
|
||||
break;
|
||||
default:
|
||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_zero(supported);
|
||||
dev_err(ds->dev, "Unsupported port: %i\n", port);
|
||||
return;
|
||||
}
|
||||
@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
|
||||
phylink_set(mask, 100baseT_Half);
|
||||
phylink_set(mask, 100baseT_Full);
|
||||
|
||||
bitmap_and(supported, supported, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
bitmap_and(state->advertising, state->advertising, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_and(supported, supported, mask);
|
||||
linkmode_and(state->advertising, state->advertising, mask);
|
||||
|
||||
return;
|
||||
|
||||
unsupported:
|
||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_zero(supported);
|
||||
dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
|
||||
state->interface, port);
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <net/dsa.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/netdev_features.h>
|
||||
@ -456,7 +457,7 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
|
||||
phylink_set(mask, 1000baseT_Full);
|
||||
break;
|
||||
default:
|
||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_zero(supported);
|
||||
dev_err(ds->dev, "Unsupported port: %i\n", port);
|
||||
return;
|
||||
}
|
||||
@ -467,10 +468,8 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
|
||||
phylink_set(mask, 10baseT_Full);
|
||||
phylink_set(mask, 100baseT_Full);
|
||||
|
||||
bitmap_and(supported, supported, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
bitmap_and(state->advertising, state->advertising, mask,
|
||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
linkmode_and(supported, supported, mask);
|
||||
linkmode_and(state->advertising, state->advertising, mask);
|
||||
}
|
||||
|
||||
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
|
||||
@ -503,7 +502,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
|
||||
}
|
||||
|
||||
static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
|
||||
struct net_device *bridge, bool join)
|
||||
struct dsa_bridge bridge, bool join)
|
||||
{
|
||||
unsigned int i, cpu_mask = 0, mask = 0;
|
||||
struct xrs700x *priv = ds->priv;
|
||||
@ -515,14 +514,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
|
||||
|
||||
cpu_mask |= BIT(i);
|
||||
|
||||
if (dsa_to_port(ds, i)->bridge_dev == bridge)
|
||||
if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||
continue;
|
||||
|
||||
mask |= BIT(i);
|
||||
}
|
||||
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
if (dsa_to_port(ds, i)->bridge_dev != bridge)
|
||||
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||
continue;
|
||||
|
||||
/* 1 = Disable forwarding to the port */
|
||||
@ -542,13 +541,13 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
|
||||
}
|
||||
|
||||
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
|
||||
struct net_device *bridge)
|
||||
struct dsa_bridge bridge, bool *tx_fwd_offload)
|
||||
{
|
||||
return xrs700x_bridge_common(ds, port, bridge, true);
|
||||
}
|
||||
|
||||
static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
|
||||
struct net_device *bridge)
|
||||
struct dsa_bridge bridge)
|
||||
{
|
||||
xrs700x_bridge_common(ds, port, bridge, false);
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
|
||||
|
||||
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
|
||||
|
||||
ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
|
||||
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
|
||||
return ret;
|
||||
@ -39,13 +39,13 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
|
||||
|
||||
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
|
||||
|
||||
ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
|
||||
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
|
||||
ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
|
||||
return ret;
|
||||
@ -64,7 +64,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
|
||||
u16 uval;
|
||||
int ret;
|
||||
|
||||
ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
|
||||
ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
|
||||
return ret;
|
||||
@ -72,7 +72,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
|
||||
|
||||
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
|
||||
|
||||
ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
|
||||
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
|
||||
return ret;
|
||||
@ -80,7 +80,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
|
||||
|
||||
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
|
||||
|
||||
ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
|
||||
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
|
||||
return ret;
|
||||
|
@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp)
|
||||
* or read adapter MAC address
|
||||
*
|
||||
* Assumptions:
|
||||
* Memory allocated from pci_alloc_consistent() call is physically
|
||||
* Memory allocated from dma_alloc_coherent() call is physically
|
||||
* contiguous, locked memory.
|
||||
*
|
||||
* Side Effects:
|
||||
@ -1117,7 +1117,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
|
||||
* dfx_ctl_set_mac_address.
|
||||
*/
|
||||
|
||||
memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
|
||||
dev_addr_set(dev, bp->factory_mac_addr);
|
||||
if (dfx_bus_tc)
|
||||
board_name = "DEFTA";
|
||||
if (dfx_bus_eisa)
|
||||
@ -1474,7 +1474,7 @@ static int dfx_open(struct net_device *dev)
|
||||
* address.
|
||||
*/
|
||||
|
||||
memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
|
||||
dev_addr_set(dev, bp->factory_mac_addr);
|
||||
|
||||
/* Clear local unicast/multicast address tables and counts */
|
||||
|
||||
@ -2379,7 +2379,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
|
||||
|
||||
/* Copy unicast address to driver-maintained structs and update count */
|
||||
|
||||
memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
|
||||
dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */
|
||||
memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
|
||||
bp->uc_count = 1;
|
||||
|
||||
@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process(
|
||||
* is contained in a single physically contiguous buffer
|
||||
* in which the virtual address of the start of packet
|
||||
* (skb->data) can be converted to a physical address
|
||||
* by using pci_map_single().
|
||||
* by using dma_map_single().
|
||||
*
|
||||
* Since the adapter architecture requires a three byte
|
||||
* packet request header to prepend the start of packet,
|
||||
@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
|
||||
* skb->data.
|
||||
* 6. The physical address of the start of packet
|
||||
* can be determined from the virtual address
|
||||
* by using pci_map_single() and is only 32-bits
|
||||
* by using dma_map_single() and is only 32-bits
|
||||
* wide.
|
||||
*/
|
||||
|
||||
|
@ -1380,7 +1380,7 @@ static int fza_probe(struct device *bdev)
|
||||
goto err_out_irq;
|
||||
|
||||
fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
|
||||
memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
|
||||
dev_addr_set(dev, (u8 *)&hw_addr);
|
||||
|
||||
fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
|
||||
fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
|
||||
|
@ -470,7 +470,7 @@ void card_stop(struct s_smc *smc);
|
||||
void init_board(struct s_smc *smc, u_char *mac_addr);
|
||||
int init_fplus(struct s_smc *smc);
|
||||
void init_plc(struct s_smc *smc);
|
||||
int init_smt(struct s_smc *smc, u_char *mac_addr);
|
||||
int init_smt(struct s_smc *smc, const u_char *mac_addr);
|
||||
void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
|
||||
void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
|
||||
void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
|
||||
|
@ -38,10 +38,10 @@
|
||||
-------------------------------------------------------------
|
||||
*/
|
||||
#ifdef COMMON_MB_POOL
|
||||
static SMbuf *mb_start = 0 ;
|
||||
static SMbuf *mb_free = 0 ;
|
||||
static SMbuf *mb_start;
|
||||
static SMbuf *mb_free;
|
||||
static int mb_init = FALSE ;
|
||||
static int call_count = 0 ;
|
||||
static int call_count;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -78,6 +78,7 @@ static const char * const boot_msg =
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/fddidevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/bitops.h>
|
||||
@ -433,7 +434,7 @@ static int skfp_driver_init(struct net_device *dev)
|
||||
}
|
||||
read_address(smc, NULL);
|
||||
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
|
||||
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
|
||||
eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
|
||||
|
||||
smt_reset_defaults(smc, 0);
|
||||
|
||||
@ -500,7 +501,7 @@ static int skfp_open(struct net_device *dev)
|
||||
* address.
|
||||
*/
|
||||
read_address(smc, NULL);
|
||||
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
|
||||
eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
|
||||
|
||||
init_smt(smc, NULL);
|
||||
smt_online(smc, 1);
|
||||
@ -924,7 +925,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
|
||||
unsigned long Flags;
|
||||
|
||||
|
||||
memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
|
||||
dev_addr_set(dev, p_sockaddr->sa_data);
|
||||
spin_lock_irqsave(&bp->DriverLock, Flags);
|
||||
ResetAdapter(smc);
|
||||
spin_unlock_irqrestore(&bp->DriverLock, Flags);
|
||||
@ -1012,7 +1013,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __
|
||||
* is contained in a single physically contiguous buffer
|
||||
* in which the virtual address of the start of packet
|
||||
* (skb->data) can be converted to a physical address
|
||||
* by using pci_map_single().
|
||||
* by using dma_map_single().
|
||||
*
|
||||
* We have an internal queue for packets we can not send
|
||||
* immediately. Packets in this queue can be given to the
|
||||
|
@ -1846,10 +1846,10 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void smt_string_swap(char *data, const char *format, int len)
|
||||
{
|
||||
const char *open_paren = NULL ;
|
||||
int x ;
|
||||
|
||||
while (len > 0 && *format) {
|
||||
switch (*format) {
|
||||
@ -1876,19 +1876,13 @@ static void smt_string_swap(char *data, const char *format, int len)
|
||||
len-- ;
|
||||
break ;
|
||||
case 's' :
|
||||
x = data[0] ;
|
||||
data[0] = data[1] ;
|
||||
data[1] = x ;
|
||||
swap(data[0], data[1]) ;
|
||||
data += 2 ;
|
||||
len -= 2 ;
|
||||
break ;
|
||||
case 'l' :
|
||||
x = data[0] ;
|
||||
data[0] = data[3] ;
|
||||
data[3] = x ;
|
||||
x = data[1] ;
|
||||
data[1] = data[2] ;
|
||||
data[2] = x ;
|
||||
swap(data[0], data[3]) ;
|
||||
swap(data[1], data[2]) ;
|
||||
data += 4 ;
|
||||
len -= 4 ;
|
||||
break ;
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include "h/fddi.h"
|
||||
#include "h/smc.h"
|
||||
|
||||
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
|
||||
void init_fddi_driver(struct s_smc *smc, const u_char *mac_addr);
|
||||
|
||||
/* define global debug variable */
|
||||
#if defined(DEBUG) && !defined(DEBUG_BRD)
|
||||
@ -57,7 +57,7 @@ static void set_oem_spec_val(struct s_smc *smc)
|
||||
/*
|
||||
* Init SMT
|
||||
*/
|
||||
int init_smt(struct s_smc *smc, u_char *mac_addr)
|
||||
int init_smt(struct s_smc *smc, const u_char *mac_addr)
|
||||
/* u_char *mac_addr; canonical address or NULL */
|
||||
{
|
||||
int p ;
|
||||
|
@ -288,7 +288,7 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_addr_lock(dev);
|
||||
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
|
||||
__dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
|
||||
netif_addr_unlock(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev)
|
||||
{
|
||||
/* Finish setting up the DEVICE info. */
|
||||
dev->netdev_ops = &sp_netdev_ops;
|
||||
dev->needs_free_netdev = true;
|
||||
dev->mtu = SIXP_MTU;
|
||||
dev->hard_header_len = AX25_MAX_HEADER_LEN;
|
||||
dev->header_ops = &ax25_header_ops;
|
||||
@ -317,7 +316,7 @@ static void sp_setup(struct net_device *dev)
|
||||
|
||||
/* Only activated in AX.25 mode */
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
|
||||
dev->flags = 0;
|
||||
}
|
||||
@ -669,19 +668,21 @@ static void sixpack_close(struct tty_struct *tty)
|
||||
*/
|
||||
netif_stop_queue(sp->dev);
|
||||
|
||||
unregister_netdev(sp->dev);
|
||||
|
||||
del_timer_sync(&sp->tx_t);
|
||||
del_timer_sync(&sp->resync_t);
|
||||
|
||||
/* Free all 6pack frame buffers. */
|
||||
/* Free all 6pack frame buffers after unreg. */
|
||||
kfree(sp->rbuff);
|
||||
kfree(sp->xbuff);
|
||||
|
||||
unregister_netdev(sp->dev);
|
||||
free_netdev(sp->dev);
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active 6pack channel. */
|
||||
static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct sixpack *sp = sp_get(tty);
|
||||
struct net_device *dev;
|
||||
@ -726,13 +727,13 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
|
||||
}
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
|
||||
__dev_addr_set(dev, &addr, AX25_ADDR_LEN);
|
||||
netif_tx_unlock_bh(dev);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
err = tty_mode_ioctl(tty, file, cmd, arg);
|
||||
err = tty_mode_ioctl(tty, cmd, arg);
|
||||
}
|
||||
|
||||
sp_put(sp);
|
||||
|
@ -791,7 +791,7 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
|
||||
struct sockaddr *sa = (struct sockaddr *)addr;
|
||||
|
||||
/* addr is an AX.25 shifted ASCII mac address */
|
||||
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
|
||||
dev_addr_set(dev, sa->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1159,7 +1159,7 @@ static void baycom_probe(struct net_device *dev)
|
||||
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
|
||||
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&null_ax25_address);
|
||||
dev->tx_queue_len = 16;
|
||||
|
||||
/* New style flags */
|
||||
|
@ -302,7 +302,7 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
|
||||
{
|
||||
struct sockaddr *sa = (struct sockaddr *)addr;
|
||||
|
||||
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
|
||||
dev_addr_set(dev, sa->sa_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -457,9 +457,6 @@ static void bpq_setup(struct net_device *dev)
|
||||
dev->netdev_ops = &bpq_netdev_ops;
|
||||
dev->needs_free_netdev = true;
|
||||
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
|
||||
dev->flags = 0;
|
||||
dev->features = NETIF_F_LLTX; /* Allow recursion */
|
||||
|
||||
@ -472,6 +469,8 @@ static void bpq_setup(struct net_device *dev)
|
||||
dev->mtu = AX25_DEF_PACLEN;
|
||||
dev->addr_len = AX25_ADDR_LEN;
|
||||
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -426,7 +426,7 @@ static void __init dev_setup(struct net_device *dev)
|
||||
dev->addr_len = AX25_ADDR_LEN;
|
||||
dev->tx_queue_len = 64;
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
}
|
||||
|
||||
static const struct net_device_ops scc_netdev_ops = {
|
||||
@ -956,8 +956,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
static int scc_set_mac_address(struct net_device *dev, void *sa)
|
||||
{
|
||||
memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
|
||||
dev->addr_len);
|
||||
dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
/*****************************************************************************/
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/net.h>
|
||||
@ -415,7 +416,7 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
|
||||
struct sockaddr *sa = (struct sockaddr *)addr;
|
||||
|
||||
/* addr is an AX.25 shifted ASCII mac address */
|
||||
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
|
||||
dev_addr_set(dev, sa->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -675,7 +676,7 @@ static void hdlcdrv_setup(struct net_device *dev)
|
||||
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
|
||||
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
dev->tx_queue_len = 16;
|
||||
}
|
||||
|
||||
|
@ -346,7 +346,7 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_addr_lock(dev);
|
||||
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
|
||||
__dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
|
||||
netif_addr_unlock(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
@ -649,7 +649,7 @@ static void ax_setup(struct net_device *dev)
|
||||
|
||||
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
|
||||
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
|
||||
}
|
||||
@ -806,8 +806,8 @@ static void mkiss_close(struct tty_struct *tty)
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active ax25 channel. */
|
||||
static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct mkiss *ax = mkiss_get(tty);
|
||||
struct net_device *dev;
|
||||
@ -853,7 +853,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
|
||||
}
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
|
||||
__dev_addr_set(dev, addr, AX25_ADDR_LEN);
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
err = 0;
|
||||
|
@ -148,6 +148,7 @@
|
||||
|
||||
/* ----------------------------------------------------------------------- */
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/signal.h>
|
||||
@ -1563,9 +1564,6 @@ static void scc_net_setup(struct net_device *dev)
|
||||
dev->netdev_ops = &scc_netdev_ops;
|
||||
dev->header_ops = &ax25_header_ops;
|
||||
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
|
||||
dev->flags = 0;
|
||||
|
||||
dev->type = ARPHRD_AX25;
|
||||
@ -1573,6 +1571,8 @@ static void scc_net_setup(struct net_device *dev)
|
||||
dev->mtu = AX25_DEF_PACLEN;
|
||||
dev->addr_len = AX25_ADDR_LEN;
|
||||
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
}
|
||||
|
||||
/* ----> open network device <---- */
|
||||
@ -1951,7 +1951,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
|
||||
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
|
||||
{
|
||||
struct sockaddr *sa = (struct sockaddr *) addr;
|
||||
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
|
||||
dev_addr_set(dev, sa->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
|
||||
struct sockaddr *sa = (struct sockaddr *) addr;
|
||||
|
||||
/* addr is an AX.25 shifted ASCII mac address */
|
||||
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
|
||||
dev_addr_set(dev, sa->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1105,7 +1105,7 @@ static void yam_setup(struct net_device *dev)
|
||||
dev->mtu = AX25_MTU;
|
||||
dev->addr_len = AX25_ADDR_LEN;
|
||||
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
|
||||
dev_addr_set(dev, (u8 *)&ax25_defaddr);
|
||||
}
|
||||
|
||||
static int __init yam_init_driver(void)
|
||||
|
@ -502,6 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv,
|
||||
|
||||
static int rr_init(struct net_device *dev)
|
||||
{
|
||||
u8 addr[HIPPI_ALEN] __aligned(4);
|
||||
struct rr_private *rrpriv;
|
||||
struct rr_regs __iomem *regs;
|
||||
u32 sram_size, rev;
|
||||
@ -537,10 +538,11 @@ static int rr_init(struct net_device *dev)
|
||||
* other method I've seen. -VAL
|
||||
*/
|
||||
|
||||
*(__be16 *)(dev->dev_addr) =
|
||||
*(__be16 *)(addr) =
|
||||
htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
|
||||
*(__be32 *)(dev->dev_addr+2) =
|
||||
*(__be32 *)(addr+2) =
|
||||
htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
|
||||
dev_addr_set(dev, addr);
|
||||
|
||||
printk(" MAC: %pM\n", dev->dev_addr);
|
||||
|
||||
|
@ -164,6 +164,7 @@ struct hv_netvsc_packet {
|
||||
u32 total_bytes;
|
||||
u32 send_buf_index;
|
||||
u32 total_data_buflen;
|
||||
struct hv_dma_range *dma_range;
|
||||
};
|
||||
|
||||
#define NETVSC_HASH_KEYLEN 40
|
||||
@ -1074,15 +1075,18 @@ struct netvsc_device {
|
||||
|
||||
/* Receive buffer allocated by us but manages by NetVSP */
|
||||
void *recv_buf;
|
||||
void *recv_original_buf;
|
||||
u32 recv_buf_size; /* allocated bytes */
|
||||
u32 recv_buf_gpadl_handle;
|
||||
struct vmbus_gpadl recv_buf_gpadl_handle;
|
||||
u32 recv_section_cnt;
|
||||
u32 recv_section_size;
|
||||
u32 recv_completion_cnt;
|
||||
|
||||
/* Send buffer allocated by us */
|
||||
void *send_buf;
|
||||
u32 send_buf_gpadl_handle;
|
||||
void *send_original_buf;
|
||||
u32 send_buf_size;
|
||||
struct vmbus_gpadl send_buf_gpadl_handle;
|
||||
u32 send_section_cnt;
|
||||
u32 send_section_size;
|
||||
unsigned long *send_section_map;
|
||||
@ -1730,4 +1734,6 @@ struct rndis_message {
|
||||
#define RETRY_US_HI 10000
|
||||
#define RETRY_MAX 2000 /* >10 sec */
|
||||
|
||||
void netvsc_dma_unmap(struct hv_device *hv_dev,
|
||||
struct hv_netvsc_packet *packet);
|
||||
#endif /* _HYPERV_NET_H */
|
||||
|
@ -153,9 +153,22 @@ static void free_netvsc_device(struct rcu_head *head)
|
||||
int i;
|
||||
|
||||
kfree(nvdev->extension);
|
||||
vfree(nvdev->recv_buf);
|
||||
vfree(nvdev->send_buf);
|
||||
kfree(nvdev->send_section_map);
|
||||
|
||||
if (nvdev->recv_original_buf) {
|
||||
hv_unmap_memory(nvdev->recv_buf);
|
||||
vfree(nvdev->recv_original_buf);
|
||||
} else {
|
||||
vfree(nvdev->recv_buf);
|
||||
}
|
||||
|
||||
if (nvdev->send_original_buf) {
|
||||
hv_unmap_memory(nvdev->send_buf);
|
||||
vfree(nvdev->send_original_buf);
|
||||
} else {
|
||||
vfree(nvdev->send_buf);
|
||||
}
|
||||
|
||||
bitmap_free(nvdev->send_section_map);
|
||||
|
||||
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
|
||||
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
|
||||
@ -278,9 +291,9 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (net_device->recv_buf_gpadl_handle) {
|
||||
if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
|
||||
ret = vmbus_teardown_gpadl(device->channel,
|
||||
net_device->recv_buf_gpadl_handle);
|
||||
&net_device->recv_buf_gpadl_handle);
|
||||
|
||||
/* If we failed here, we might as well return and have a leak
|
||||
* rather than continue and a bugchk
|
||||
@ -290,7 +303,6 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
|
||||
"unable to teardown receive buffer's gpadl\n");
|
||||
return;
|
||||
}
|
||||
net_device->recv_buf_gpadl_handle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,9 +312,9 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (net_device->send_buf_gpadl_handle) {
|
||||
if (net_device->send_buf_gpadl_handle.gpadl_handle) {
|
||||
ret = vmbus_teardown_gpadl(device->channel,
|
||||
net_device->send_buf_gpadl_handle);
|
||||
&net_device->send_buf_gpadl_handle);
|
||||
|
||||
/* If we failed here, we might as well return and have a leak
|
||||
* rather than continue and a bugchk
|
||||
@ -312,7 +324,6 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
|
||||
"unable to teardown send buffer's gpadl\n");
|
||||
return;
|
||||
}
|
||||
net_device->send_buf_gpadl_handle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,8 +349,8 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
struct net_device *ndev = hv_get_drvdata(device);
|
||||
struct nvsp_message *init_packet;
|
||||
unsigned int buf_size;
|
||||
size_t map_words;
|
||||
int i, ret = 0;
|
||||
void *vaddr;
|
||||
|
||||
/* Get receive buffer area. */
|
||||
buf_size = device_info->recv_sections * device_info->recv_section_size;
|
||||
@ -375,12 +386,23 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (hv_isolation_type_snp()) {
|
||||
vaddr = hv_map_memory(net_device->recv_buf, buf_size);
|
||||
if (!vaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
net_device->recv_original_buf = net_device->recv_buf;
|
||||
net_device->recv_buf = vaddr;
|
||||
}
|
||||
|
||||
/* Notify the NetVsp of the gpadl handle */
|
||||
init_packet = &net_device->channel_init_pkt;
|
||||
memset(init_packet, 0, sizeof(struct nvsp_message));
|
||||
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
|
||||
init_packet->msg.v1_msg.send_recv_buf.
|
||||
gpadl_handle = net_device->recv_buf_gpadl_handle;
|
||||
gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
|
||||
init_packet->msg.v1_msg.
|
||||
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
|
||||
|
||||
@ -463,6 +485,7 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
net_device->send_buf_size = buf_size;
|
||||
|
||||
/* Establish the gpadl handle for this buffer on this
|
||||
* channel. Note: This call uses the vmbus connection rather
|
||||
@ -477,12 +500,23 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (hv_isolation_type_snp()) {
|
||||
vaddr = hv_map_memory(net_device->send_buf, buf_size);
|
||||
if (!vaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
net_device->send_original_buf = net_device->send_buf;
|
||||
net_device->send_buf = vaddr;
|
||||
}
|
||||
|
||||
/* Notify the NetVsp of the gpadl handle */
|
||||
init_packet = &net_device->channel_init_pkt;
|
||||
memset(init_packet, 0, sizeof(struct nvsp_message));
|
||||
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
|
||||
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
|
||||
net_device->send_buf_gpadl_handle;
|
||||
net_device->send_buf_gpadl_handle.gpadl_handle;
|
||||
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
|
||||
|
||||
trace_nvsp_send(ndev, init_packet);
|
||||
@ -529,10 +563,9 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
net_device->send_section_size, net_device->send_section_cnt);
|
||||
|
||||
/* Setup state for managing the send buffer. */
|
||||
map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
|
||||
|
||||
net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
|
||||
if (net_device->send_section_map == NULL) {
|
||||
net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
|
||||
GFP_KERNEL);
|
||||
if (!net_device->send_section_map) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -767,7 +800,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
|
||||
|
||||
/* Notify the layer above us */
|
||||
if (likely(skb)) {
|
||||
const struct hv_netvsc_packet *packet
|
||||
struct hv_netvsc_packet *packet
|
||||
= (struct hv_netvsc_packet *)skb->cb;
|
||||
u32 send_index = packet->send_buf_index;
|
||||
struct netvsc_stats *tx_stats;
|
||||
@ -783,6 +816,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
|
||||
tx_stats->bytes += packet->total_bytes;
|
||||
u64_stats_update_end(&tx_stats->syncp);
|
||||
|
||||
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
|
||||
napi_consume_skb(skb, budget);
|
||||
}
|
||||
|
||||
@ -947,6 +981,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
memset(dest, 0, padding);
|
||||
}
|
||||
|
||||
void netvsc_dma_unmap(struct hv_device *hv_dev,
|
||||
struct hv_netvsc_packet *packet)
|
||||
{
|
||||
u32 page_count = packet->cp_partial ?
|
||||
packet->page_buf_cnt - packet->rmsg_pgcnt :
|
||||
packet->page_buf_cnt;
|
||||
int i;
|
||||
|
||||
if (!hv_is_isolation_supported())
|
||||
return;
|
||||
|
||||
if (!packet->dma_range)
|
||||
return;
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
|
||||
packet->dma_range[i].mapping_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
kfree(packet->dma_range);
|
||||
}
|
||||
|
||||
/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
|
||||
* packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
|
||||
* VM.
|
||||
*
|
||||
* In isolation VM, netvsc send buffer has been marked visible to
|
||||
* host and so the data copied to send buffer doesn't need to use
|
||||
* bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
|
||||
* may not be copied to send buffer and so these pages need to be
|
||||
* mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
|
||||
* that. The pfns in the struct hv_page_buffer need to be converted
|
||||
* to bounce buffer's pfn. The loop here is necessary because the
|
||||
* entries in the page buffer array are not necessarily full
|
||||
* pages of data. Each entry in the array has a separate offset and
|
||||
* len that may be non-zero, even for entries in the middle of the
|
||||
* array. And the entries are not physically contiguous. So each
|
||||
* entry must be individually mapped rather than as a contiguous unit.
|
||||
* So not use dma_map_sg() here.
|
||||
*/
|
||||
static int netvsc_dma_map(struct hv_device *hv_dev,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
u32 page_count = packet->cp_partial ?
|
||||
packet->page_buf_cnt - packet->rmsg_pgcnt :
|
||||
packet->page_buf_cnt;
|
||||
dma_addr_t dma;
|
||||
int i;
|
||||
|
||||
if (!hv_is_isolation_supported())
|
||||
return 0;
|
||||
|
||||
packet->dma_range = kcalloc(page_count,
|
||||
sizeof(*packet->dma_range),
|
||||
GFP_KERNEL);
|
||||
if (!packet->dma_range)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
|
||||
+ pb[i].offset);
|
||||
u32 len = pb[i].len;
|
||||
|
||||
dma = dma_map_single(&hv_dev->device, src, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&hv_dev->device, dma)) {
|
||||
kfree(packet->dma_range);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* pb[].offset and pb[].len are not changed during dma mapping
|
||||
* and so not reassign.
|
||||
*/
|
||||
packet->dma_range[i].dma = dma;
|
||||
packet->dma_range[i].mapping_size = len;
|
||||
pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int netvsc_send_pkt(
|
||||
struct hv_device *device,
|
||||
struct hv_netvsc_packet *packet,
|
||||
@ -987,14 +1103,24 @@ static inline int netvsc_send_pkt(
|
||||
|
||||
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
|
||||
|
||||
packet->dma_range = NULL;
|
||||
if (packet->page_buf_cnt) {
|
||||
if (packet->cp_partial)
|
||||
pb += packet->rmsg_pgcnt;
|
||||
|
||||
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
|
||||
if (ret) {
|
||||
ret = -EAGAIN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = vmbus_sendpacket_pagebuffer(out_channel,
|
||||
pb, packet->page_buf_cnt,
|
||||
&nvmsg, sizeof(nvmsg),
|
||||
req_id);
|
||||
|
||||
if (ret)
|
||||
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
|
||||
} else {
|
||||
ret = vmbus_sendpacket(out_channel,
|
||||
&nvmsg, sizeof(nvmsg),
|
||||
@ -1002,6 +1128,7 @@ static inline int netvsc_send_pkt(
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
}
|
||||
|
||||
exit:
|
||||
if (ret == 0) {
|
||||
atomic_inc_return(&nvchan->queue_sends);
|
||||
|
||||
|
@ -68,7 +68,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
|
||||
break;
|
||||
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
bpf_warn_invalid_xdp_action(ndev, prog, act);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -803,6 +803,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
|
||||
schedule_delayed_work(&ndev_ctx->dwork, 0);
|
||||
}
|
||||
|
||||
/* This function should only be called after skb_record_rx_queue() */
|
||||
static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
int rc;
|
||||
@ -1586,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
|
||||
pcpu_sum = kvmalloc_array(num_possible_cpus(),
|
||||
sizeof(struct netvsc_ethtool_pcpu_stats),
|
||||
GFP_KERNEL);
|
||||
if (!pcpu_sum)
|
||||
return;
|
||||
|
||||
netvsc_get_pcpu_stats(dev, pcpu_sum);
|
||||
for_each_present_cpu(cpu) {
|
||||
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
|
||||
@ -1857,7 +1861,9 @@ static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
|
||||
}
|
||||
|
||||
static void netvsc_get_ringparam(struct net_device *ndev,
|
||||
struct ethtool_ringparam *ring)
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kernel_ring,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net_device_context *ndevctx = netdev_priv(ndev);
|
||||
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
|
||||
@ -1869,7 +1875,9 @@ static void netvsc_get_ringparam(struct net_device *ndev,
|
||||
}
|
||||
|
||||
static int netvsc_set_ringparam(struct net_device *ndev,
|
||||
struct ethtool_ringparam *ring)
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kernel_ring,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net_device_context *ndevctx = netdev_priv(ndev);
|
||||
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
|
||||
@ -2511,6 +2519,7 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
net->netdev_ops = &device_ops;
|
||||
net->ethtool_ops = ðtool_ops;
|
||||
SET_NETDEV_DEV(net, &dev->device);
|
||||
dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
|
||||
|
||||
/* We always need headroom for rndis header */
|
||||
net->needed_headroom = RNDIS_AND_PPI_SIZE;
|
||||
@ -2536,7 +2545,7 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
goto rndis_failed;
|
||||
}
|
||||
|
||||
memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
|
||||
eth_hw_addr_set(net, device_info->mac_adr);
|
||||
|
||||
/* We must get rtnl lock before scheduling nvdev->subchan_work,
|
||||
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
|
||||
@ -2742,8 +2751,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Avoid Bonding master dev with same MAC registering as VF */
|
||||
if ((event_dev->priv_flags & IFF_BONDING) &&
|
||||
(event_dev->flags & IFF_MASTER))
|
||||
if (netif_is_bond_master(event_dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
|
@ -361,6 +361,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
|
||||
}
|
||||
}
|
||||
|
||||
netvsc_dma_unmap(((struct net_device_context *)
|
||||
netdev_priv(ndev))->device_ctx, &request->pkt);
|
||||
complete(&request->wait_event);
|
||||
} else {
|
||||
netdev_err(ndev,
|
||||
|
@ -4,6 +4,7 @@ config QCOM_IPA
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
depends on INTERCONNECT
|
||||
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
|
||||
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
|
||||
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||
select QCOM_SCM
|
||||
select QCOM_QMI_HELPERS
|
||||
|
@ -93,6 +93,7 @@
|
||||
|
||||
#define GSI_CHANNEL_STOP_RETRIES 10
|
||||
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
|
||||
#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
|
||||
|
||||
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
|
||||
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
|
||||
@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
|
||||
* completion to be signaled. Returns true if the command completes
|
||||
* or false if it times out.
|
||||
*/
|
||||
static bool
|
||||
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
|
||||
static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
|
||||
struct completion *completion = &gsi->completion;
|
||||
|
||||
reinit_completion(completion);
|
||||
|
||||
@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
|
||||
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
|
||||
enum gsi_evt_cmd_opcode opcode)
|
||||
{
|
||||
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
struct completion *completion = &evt_ring->completion;
|
||||
struct device *dev = gsi->dev;
|
||||
bool timeout;
|
||||
u32 val;
|
||||
@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
|
||||
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
|
||||
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
|
||||
|
||||
timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
|
||||
|
||||
gsi_irq_ev_ctrl_disable(gsi);
|
||||
|
||||
@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
|
||||
static void
|
||||
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
|
||||
{
|
||||
struct completion *completion = &channel->completion;
|
||||
u32 channel_id = gsi_channel_id(channel);
|
||||
struct gsi *gsi = channel->gsi;
|
||||
struct device *dev = gsi->dev;
|
||||
@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
|
||||
|
||||
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
|
||||
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
|
||||
timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
|
||||
|
||||
gsi_irq_ch_ctrl_disable(gsi);
|
||||
|
||||
@ -1074,13 +1072,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
|
||||
|
||||
while (channel_mask) {
|
||||
u32 channel_id = __ffs(channel_mask);
|
||||
struct gsi_channel *channel;
|
||||
|
||||
channel_mask ^= BIT(channel_id);
|
||||
|
||||
channel = &gsi->channel[channel_id];
|
||||
|
||||
complete(&channel->completion);
|
||||
complete(&gsi->completion);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1094,13 +1089,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
|
||||
|
||||
while (event_mask) {
|
||||
u32 evt_ring_id = __ffs(event_mask);
|
||||
struct gsi_evt_ring *evt_ring;
|
||||
|
||||
event_mask ^= BIT(evt_ring_id);
|
||||
|
||||
evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
|
||||
complete(&evt_ring->completion);
|
||||
complete(&gsi->completion);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1110,7 +1102,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
|
||||
{
|
||||
if (code == GSI_OUT_OF_RESOURCES) {
|
||||
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
|
||||
complete(&gsi->channel[channel_id].completion);
|
||||
complete(&gsi->completion);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1127,7 +1119,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
|
||||
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
u32 channel_id = gsi_channel_id(evt_ring->channel);
|
||||
|
||||
complete(&evt_ring->completion);
|
||||
complete(&gsi->completion);
|
||||
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
|
||||
channel_id);
|
||||
return;
|
||||
@ -1171,18 +1163,23 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
|
||||
u32 result;
|
||||
u32 val;
|
||||
|
||||
/* This interrupt is used to handle completions of the two GENERIC
|
||||
* GSI commands. We use these to allocate and halt channels on
|
||||
* the modem's behalf due to a hardware quirk on IPA v4.2. Once
|
||||
* allocated, the modem "owns" these channels, and as a result we
|
||||
* have no way of knowing the channel's state at any given time.
|
||||
/* This interrupt is used to handle completions of GENERIC GSI
|
||||
* commands. We use these to allocate and halt channels on the
|
||||
* modem's behalf due to a hardware quirk on IPA v4.2. The modem
|
||||
* "owns" channels even when the AP allocates them, and have no
|
||||
* way of knowing whether a modem channel's state has been changed.
|
||||
*
|
||||
* We also use GENERIC commands to enable/disable channel flow
|
||||
* control for IPA v4.2+.
|
||||
*
|
||||
* It is recommended that we halt the modem channels we allocated
|
||||
* when shutting down, but it's possible the channel isn't running
|
||||
* at the time we issue the HALT command. We'll get an error in
|
||||
* that case, but it's harmless (the channel is already halted).
|
||||
* Similarly, we could get an error back when updating flow control
|
||||
* on a channel because it's not in the proper state.
|
||||
*
|
||||
* For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
|
||||
* In either case, we silently ignore a CHANNEL_NOT_RUNNING error
|
||||
* if we receive it.
|
||||
*/
|
||||
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
|
||||
@ -1648,19 +1645,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
|
||||
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
|
||||
}
|
||||
|
||||
/* We use generic commands only to operate on modem channels. We don't have
|
||||
* the ability to determine channel state for a modem channel, so we simply
|
||||
* issue the command and wait for it to complete.
|
||||
*/
|
||||
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
||||
enum gsi_generic_cmd_opcode opcode)
|
||||
enum gsi_generic_cmd_opcode opcode,
|
||||
u8 params)
|
||||
{
|
||||
struct completion *completion = &gsi->completion;
|
||||
bool timeout;
|
||||
u32 val;
|
||||
|
||||
/* The error global interrupt type is always enabled (until we
|
||||
* teardown), so we won't change that. A generic EE command
|
||||
* completes with a GSI global interrupt of type GP_INT1. We
|
||||
* only perform one generic command at a time (to allocate or
|
||||
* halt a modem channel) and only from this function. So we
|
||||
* enable the GP_INT1 IRQ type here while we're expecting it.
|
||||
/* The error global interrupt type is always enabled (until we tear
|
||||
* down), so we will keep it enabled.
|
||||
*
|
||||
* A generic EE command completes with a GSI global interrupt of
|
||||
* type GP_INT1. We only perform one generic command at a time
|
||||
* (to allocate, halt, or enable/disable flow control on a modem
|
||||
* channel), and only from this function. So we enable the GP_INT1
|
||||
* IRQ type here, and disable it again after the command completes.
|
||||
*/
|
||||
val = BIT(ERROR_INT) | BIT(GP_INT1);
|
||||
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
|
||||
@ -1674,8 +1677,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
||||
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
|
||||
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
|
||||
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
|
||||
val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
|
||||
|
||||
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
|
||||
|
||||
/* Disable the GP_INT1 IRQ type again */
|
||||
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
|
||||
@ -1692,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
||||
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
|
||||
{
|
||||
return gsi_generic_command(gsi, channel_id,
|
||||
GSI_GENERIC_ALLOCATE_CHANNEL);
|
||||
GSI_GENERIC_ALLOCATE_CHANNEL, 0);
|
||||
}
|
||||
|
||||
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
|
||||
@ -1702,7 +1706,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
|
||||
|
||||
do
|
||||
ret = gsi_generic_command(gsi, channel_id,
|
||||
GSI_GENERIC_HALT_CHANNEL);
|
||||
GSI_GENERIC_HALT_CHANNEL, 0);
|
||||
while (ret == -EAGAIN && retries--);
|
||||
|
||||
if (ret)
|
||||
@ -1710,6 +1714,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
|
||||
ret, channel_id);
|
||||
}
|
||||
|
||||
/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
|
||||
void
|
||||
gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
|
||||
{
|
||||
u32 retries = 0;
|
||||
u32 command;
|
||||
int ret;
|
||||
|
||||
command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
|
||||
: GSI_GENERIC_DISABLE_FLOW_CONTROL;
|
||||
/* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
|
||||
* is underway. In this case we need to retry the command.
|
||||
*/
|
||||
if (!enable && gsi->version >= IPA_VERSION_4_11)
|
||||
retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
|
||||
|
||||
do
|
||||
ret = gsi_generic_command(gsi, channel_id, command, 0);
|
||||
while (ret == -EAGAIN && retries--);
|
||||
|
||||
if (ret)
|
||||
dev_err(gsi->dev,
|
||||
"error %d %sabling mode channel %u flow control\n",
|
||||
ret, enable ? "en" : "dis", channel_id);
|
||||
}
|
||||
|
||||
/* Setup function for channels */
|
||||
static int gsi_channel_setup(struct gsi *gsi)
|
||||
{
|
||||
@ -1975,18 +2005,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
|
||||
gsi_evt_ring_id_free(gsi, evt_ring_id);
|
||||
}
|
||||
|
||||
/* Init function for event rings; there is no gsi_evt_ring_exit() */
|
||||
static void gsi_evt_ring_init(struct gsi *gsi)
|
||||
{
|
||||
u32 evt_ring_id = 0;
|
||||
|
||||
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
|
||||
gsi->ieob_enabled_bitmap = 0;
|
||||
do
|
||||
init_completion(&gsi->evt_ring[evt_ring_id].completion);
|
||||
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
|
||||
}
|
||||
|
||||
static bool gsi_channel_data_valid(struct gsi *gsi,
|
||||
const struct ipa_gsi_endpoint_data *data)
|
||||
{
|
||||
@ -2069,7 +2087,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
|
||||
channel->tlv_count = data->channel.tlv_count;
|
||||
channel->tre_count = tre_count;
|
||||
channel->event_count = data->channel.event_count;
|
||||
init_completion(&channel->completion);
|
||||
|
||||
ret = gsi_channel_evt_ring_init(channel);
|
||||
if (ret)
|
||||
@ -2129,7 +2146,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
|
||||
/* IPA v4.2 requires the AP to allocate channels for the modem */
|
||||
modem_alloc = gsi->version == IPA_VERSION_4_2;
|
||||
|
||||
gsi_evt_ring_init(gsi); /* No matching exit required */
|
||||
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
|
||||
gsi->ieob_enabled_bitmap = 0;
|
||||
|
||||
/* The endpoint data array is indexed by endpoint name */
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -101,6 +101,7 @@ enum gsi_channel_state {
|
||||
GSI_CHANNEL_STATE_STARTED = 0x2,
|
||||
GSI_CHANNEL_STATE_STOPPED = 0x3,
|
||||
GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
|
||||
GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */
|
||||
GSI_CHANNEL_STATE_ERROR = 0xf,
|
||||
};
|
||||
|
||||
@ -114,8 +115,6 @@ struct gsi_channel {
|
||||
u16 tre_count;
|
||||
u16 event_count;
|
||||
|
||||
struct completion completion; /* signals channel command completion */
|
||||
|
||||
struct gsi_ring tre_ring;
|
||||
u32 evt_ring_id;
|
||||
|
||||
@ -141,28 +140,27 @@ enum gsi_evt_ring_state {
|
||||
|
||||
struct gsi_evt_ring {
|
||||
struct gsi_channel *channel;
|
||||
struct completion completion; /* signals event ring state changes */
|
||||
struct gsi_ring ring;
|
||||
};
|
||||
|
||||
struct gsi {
|
||||
struct device *dev; /* Same as IPA device */
|
||||
enum ipa_version version;
|
||||
struct net_device dummy_dev; /* needed for NAPI */
|
||||
void __iomem *virt_raw; /* I/O mapped address range */
|
||||
void __iomem *virt; /* Adjusted for most registers */
|
||||
u32 irq;
|
||||
u32 channel_count;
|
||||
u32 evt_ring_count;
|
||||
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
|
||||
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
|
||||
u32 event_bitmap; /* allocated event rings */
|
||||
u32 modem_channel_bitmap; /* modem channels to allocate */
|
||||
u32 type_enabled_bitmap; /* GSI IRQ types enabled */
|
||||
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
|
||||
struct completion completion; /* for global EE commands */
|
||||
int result; /* Negative errno (generic commands) */
|
||||
struct completion completion; /* Signals GSI command completion */
|
||||
struct mutex mutex; /* protects commands, programming */
|
||||
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
|
||||
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
|
||||
struct net_device dummy_dev; /* needed for NAPI */
|
||||
};
|
||||
|
||||
/**
|
||||
@ -218,6 +216,15 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
|
||||
*/
|
||||
int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
|
||||
|
||||
/**
|
||||
* gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
|
||||
* @gsi: GSI pointer returned by gsi_setup()
|
||||
* @channel_id: Modem TX channel to control
|
||||
* @enable: Whether to enable flow control (i.e., prevent flow)
|
||||
*/
|
||||
void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
|
||||
bool enable);
|
||||
|
||||
/**
|
||||
* gsi_channel_reset() - Reset an allocated GSI channel
|
||||
* @gsi: GSI pointer
|
||||
|
@ -313,11 +313,15 @@ enum gsi_evt_cmd_opcode {
|
||||
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
|
||||
#define GENERIC_CHID_FMASK GENMASK(9, 5)
|
||||
#define GENERIC_EE_FMASK GENMASK(13, 10)
|
||||
#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
|
||||
|
||||
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
|
||||
enum gsi_generic_cmd_opcode {
|
||||
GSI_GENERIC_HALT_CHANNEL = 0x1,
|
||||
GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
|
||||
GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */
|
||||
GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */
|
||||
GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
|
||||
};
|
||||
|
||||
/* The next register is present for IPA v3.5.1 and above */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user