2021-05-26 19:09:36 +00:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <generated/utsrelease.h>
# include "ice.h"
# include "ice_base.h"
# include "ice_lib.h"
# include "ice_fltr.h"
# include "ice_dcb_lib.h"
# include "ice_dcb_nl.h"
# include "ice_devlink.h"
2021-07-20 16:20:39 +00:00
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions . This must be done exactly once across the
* ice driver .
*/
# define CREATE_TRACE_POINTS
# include "ice_trace.h"
2021-05-26 19:09:36 +00:00
# define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string [ ] = DRV_SUMMARY ;
static const char ice_copyright [ ] = " Copyright (c) 2018, Intel Corporation. " ;
/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
# define ICE_DDP_PKG_PATH "intel / ice / ddp / "
# define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
MODULE_AUTHOR ( " Intel Corporation, <linux.nics@intel.com> " ) ;
MODULE_DESCRIPTION ( DRV_SUMMARY ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_FIRMWARE ( ICE_DDP_PKG_FILE ) ;
static int debug = - 1 ;
module_param ( debug , int , 0644 ) ;
# ifndef CONFIG_DYNAMIC_DEBUG
MODULE_PARM_DESC ( debug , " netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX) " ) ;
# else
MODULE_PARM_DESC ( debug , " netif level (0=none,...,16=all) " ) ;
# endif /* !CONFIG_DYNAMIC_DEBUG */
2021-07-20 16:20:39 +00:00
static DEFINE_IDA ( ice_aux_ida ) ;
2021-05-26 19:09:36 +00:00
static struct workqueue_struct * ice_wq ;
static const struct net_device_ops ice_netdev_safe_mode_ops ;
static const struct net_device_ops ice_netdev_ops ;
static int ice_vsi_open ( struct ice_vsi * vsi ) ;
static void ice_rebuild ( struct ice_pf * pf , enum ice_reset_req reset_type ) ;
static void ice_vsi_release_all ( struct ice_pf * pf ) ;
bool netif_is_ice ( struct net_device * dev )
{
return dev & & ( dev - > netdev_ops = = & ice_netdev_ops ) ;
}
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ ring : the ring of descriptors
*/
static u16 ice_get_tx_pending ( struct ice_ring * ring )
{
u16 head , tail ;
head = ring - > next_to_clean ;
tail = ring - > next_to_use ;
if ( head ! = tail )
return ( head < tail ) ?
tail - head : ( tail + ring - > count - head ) ;
return 0 ;
}
/**
* ice_check_for_hang_subtask - check for and recover hung queues
* @ pf : pointer to PF struct
*/
static void ice_check_for_hang_subtask ( struct ice_pf * pf )
{
struct ice_vsi * vsi = NULL ;
struct ice_hw * hw ;
unsigned int i ;
int packets ;
u32 v ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] & & pf - > vsi [ v ] - > type = = ICE_VSI_PF ) {
vsi = pf - > vsi [ v ] ;
break ;
}
2021-07-12 19:01:19 +00:00
if ( ! vsi | | test_bit ( ICE_VSI_DOWN , vsi - > state ) )
2021-05-26 19:09:36 +00:00
return ;
if ( ! ( vsi - > netdev & & netif_carrier_ok ( vsi - > netdev ) ) )
return ;
hw = & vsi - > back - > hw ;
for ( i = 0 ; i < vsi - > num_txq ; i + + ) {
struct ice_ring * tx_ring = vsi - > tx_rings [ i ] ;
if ( tx_ring & & tx_ring - > desc ) {
/* If packet counter has not changed the queue is
* likely stalled , so force an interrupt for this
* queue .
*
* prev_pkt would be negative if there was no
* pending work .
*/
packets = tx_ring - > stats . pkts & INT_MAX ;
if ( tx_ring - > tx_stats . prev_pkt = = packets ) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr ( hw , tx_ring - > q_vector ) ;
continue ;
}
/* Memory barrier between read of packet count and call
* to ice_get_tx_pending ( )
*/
smp_rmb ( ) ;
tx_ring - > tx_stats . prev_pkt =
ice_get_tx_pending ( tx_ring ) ? packets : - 1 ;
}
}
}
/**
* ice_init_mac_fltr - Set initial MAC filters
* @ pf : board private structure
*
* Set initial set of MAC filters for PF VSI ; configure filters for permanent
* address and broadcast address . If an error is encountered , netdevice will be
* unregistered .
*/
static int ice_init_mac_fltr ( struct ice_pf * pf )
{
enum ice_status status ;
struct ice_vsi * vsi ;
u8 * perm_addr ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
return - EINVAL ;
perm_addr = vsi - > port_info - > mac . perm_addr ;
status = ice_fltr_add_mac_and_broadcast ( vsi , perm_addr , ICE_FWD_TO_VSI ) ;
2021-07-12 19:01:19 +00:00
if ( status )
return - EIO ;
2021-05-26 19:09:36 +00:00
2021-07-12 19:01:19 +00:00
return 0 ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @ netdev : the net device on which the sync is happening
* @ addr : MAC address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions ( like __dev_uc_sync , __dev_mc_sync , etc ) . This function only
* populates the tmp_sync_list , which is later used by ice_add_mac to add the
* MAC filters from the hardware .
*/
static int ice_add_mac_to_sync_list ( struct net_device * netdev , const u8 * addr )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
if ( ice_fltr_add_mac_to_list ( vsi , & vsi - > tmp_sync_list , addr ,
ICE_FWD_TO_VSI ) )
return - EINVAL ;
return 0 ;
}
/**
* ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
* @ netdev : the net device on which the unsync is happening
* @ addr : MAC address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions ( like __dev_uc_unsync , __dev_mc_unsync , etc ) . This function only
* populates the tmp_unsync_list , which is later used by ice_remove_mac to
* delete the MAC filters from the hardware .
*/
static int ice_add_mac_to_unsync_list ( struct net_device * netdev , const u8 * addr )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
if ( ice_fltr_add_mac_to_list ( vsi , & vsi - > tmp_unsync_list , addr ,
ICE_FWD_TO_VSI ) )
return - EINVAL ;
return 0 ;
}
/**
* ice_vsi_fltr_changed - check if filter state changed
* @ vsi : VSI to be checked
*
* returns true if filter state has changed , false otherwise .
*/
static bool ice_vsi_fltr_changed ( struct ice_vsi * vsi )
{
2021-07-12 19:01:19 +00:00
return test_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) | |
test_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) | |
test_bit ( ICE_VSI_VLAN_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
* @ vsi : the VSI being configured
* @ promisc_m : mask of promiscuous config bits
* @ set_promisc : enable or disable promisc flag request
*
*/
static int ice_cfg_promisc ( struct ice_vsi * vsi , u8 promisc_m , bool set_promisc )
{
struct ice_hw * hw = & vsi - > back - > hw ;
enum ice_status status = 0 ;
if ( vsi - > type ! = ICE_VSI_PF )
return 0 ;
if ( vsi - > num_vlan > 1 ) {
status = ice_set_vlan_vsi_promisc ( hw , vsi - > idx , promisc_m ,
set_promisc ) ;
} else {
if ( set_promisc )
status = ice_set_vsi_promisc ( hw , vsi - > idx , promisc_m ,
0 ) ;
else
status = ice_clear_vsi_promisc ( hw , vsi - > idx , promisc_m ,
0 ) ;
}
if ( status )
return - EIO ;
return 0 ;
}
/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @ vsi : ptr to the VSI
*
* Push any outstanding VSI filter changes through the AdminQ .
*/
static int ice_vsi_sync_fltr ( struct ice_vsi * vsi )
{
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
struct net_device * netdev = vsi - > netdev ;
bool promisc_forced_on = false ;
struct ice_pf * pf = vsi - > back ;
struct ice_hw * hw = & pf - > hw ;
enum ice_status status = 0 ;
u32 changed_flags = 0 ;
u8 promisc_m ;
int err = 0 ;
if ( ! vsi - > netdev )
return - EINVAL ;
2021-07-12 19:01:19 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , vsi - > state ) )
2021-05-26 19:09:36 +00:00
usleep_range ( 1000 , 2000 ) ;
changed_flags = vsi - > current_netdev_flags ^ vsi - > netdev - > flags ;
vsi - > current_netdev_flags = vsi - > netdev - > flags ;
INIT_LIST_HEAD ( & vsi - > tmp_sync_list ) ;
INIT_LIST_HEAD ( & vsi - > tmp_unsync_list ) ;
if ( ice_vsi_fltr_changed ( vsi ) ) {
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
clear_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
clear_bit ( ICE_VSI_VLAN_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh ( netdev ) ;
__dev_uc_sync ( netdev , ice_add_mac_to_sync_list ,
ice_add_mac_to_unsync_list ) ;
__dev_mc_sync ( netdev , ice_add_mac_to_sync_list ,
ice_add_mac_to_unsync_list ) ;
/* our temp lists are populated. release lock */
netif_addr_unlock_bh ( netdev ) ;
}
/* Remove MAC addresses in the unsync list */
status = ice_fltr_remove_mac_list ( vsi , & vsi - > tmp_unsync_list ) ;
ice_fltr_free_list ( dev , & vsi - > tmp_unsync_list ) ;
if ( status ) {
netdev_err ( netdev , " Failed to delete MAC filters \n " ) ;
/* if we failed because of alloc failures, just bail */
if ( status = = ICE_ERR_NO_MEMORY ) {
err = - ENOMEM ;
goto out ;
}
}
/* Add MAC addresses in the sync list */
status = ice_fltr_add_mac_list ( vsi , & vsi - > tmp_sync_list ) ;
ice_fltr_free_list ( dev , & vsi - > tmp_sync_list ) ;
/* If filter is added successfully or already exists, do not go into
* ' if ' condition and report it as error . Instead continue processing
* rest of the function .
*/
if ( status & & status ! = ICE_ERR_ALREADY_EXISTS ) {
netdev_err ( netdev , " Failed to add MAC filters \n " ) ;
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode . There should be some
* space reserved for promiscuous filters .
*/
if ( hw - > adminq . sq_last_status = = ICE_AQ_RC_ENOSPC & &
2021-07-12 19:01:19 +00:00
! test_and_set_bit ( ICE_FLTR_OVERFLOW_PROMISC ,
2021-05-26 19:09:36 +00:00
vsi - > state ) ) {
promisc_forced_on = true ;
netdev_warn ( netdev , " Reached MAC filter limit, forcing promisc mode on VSI %d \n " ,
vsi - > vsi_num ) ;
} else {
err = - EIO ;
goto out ;
}
}
/* check for changes in promiscuous modes */
if ( changed_flags & IFF_ALLMULTI ) {
if ( vsi - > current_netdev_flags & IFF_ALLMULTI ) {
if ( vsi - > num_vlan > 1 )
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS ;
else
promisc_m = ICE_MCAST_PROMISC_BITS ;
err = ice_cfg_promisc ( vsi , promisc_m , true ) ;
if ( err ) {
netdev_err ( netdev , " Error setting Multicast promiscuous mode on VSI %i \n " ,
vsi - > vsi_num ) ;
vsi - > current_netdev_flags & = ~ IFF_ALLMULTI ;
goto out_promisc ;
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if ( vsi - > num_vlan > 1 )
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS ;
else
promisc_m = ICE_MCAST_PROMISC_BITS ;
err = ice_cfg_promisc ( vsi , promisc_m , false ) ;
if ( err ) {
netdev_err ( netdev , " Error clearing Multicast promiscuous mode on VSI %i \n " ,
vsi - > vsi_num ) ;
vsi - > current_netdev_flags | = IFF_ALLMULTI ;
goto out_promisc ;
}
}
}
if ( ( ( changed_flags & IFF_PROMISC ) | | promisc_forced_on ) | |
2021-07-12 19:01:19 +00:00
test_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ) {
clear_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
if ( vsi - > current_netdev_flags & IFF_PROMISC ) {
/* Apply Rx filter rule to get traffic from wire */
if ( ! ice_is_dflt_vsi_in_use ( pf - > first_sw ) ) {
err = ice_set_dflt_vsi ( pf - > first_sw , vsi ) ;
if ( err & & err ! = - EEXIST ) {
netdev_err ( netdev , " Error %d setting default VSI %i Rx rule \n " ,
err , vsi - > vsi_num ) ;
vsi - > current_netdev_flags & =
~ IFF_PROMISC ;
goto out_promisc ;
}
ice_cfg_vlan_pruning ( vsi , false , false ) ;
}
} else {
/* Clear Rx filter to remove traffic from wire */
if ( ice_is_vsi_dflt_vsi ( pf - > first_sw , vsi ) ) {
err = ice_clear_dflt_vsi ( pf - > first_sw ) ;
if ( err ) {
netdev_err ( netdev , " Error %d clearing default VSI %i Rx rule \n " ,
err , vsi - > vsi_num ) ;
vsi - > current_netdev_flags | =
IFF_PROMISC ;
goto out_promisc ;
}
if ( vsi - > num_vlan > 1 )
ice_cfg_vlan_pruning ( vsi , true , false ) ;
}
}
}
goto exit ;
out_promisc :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
goto exit ;
out :
/* if something went wrong then set the changed flag so we try again */
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
set_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
exit :
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_CFG_BUSY , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
return err ;
}
/**
* ice_sync_fltr_subtask - Sync the VSI filter list with HW
* @ pf : board private structure
*/
static void ice_sync_fltr_subtask ( struct ice_pf * pf )
{
int v ;
if ( ! pf | | ! ( test_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ) )
return ;
clear_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] & & ice_vsi_fltr_changed ( pf - > vsi [ v ] ) & &
ice_vsi_sync_fltr ( pf - > vsi [ v ] ) ) {
/* come back and try again later */
set_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ;
break ;
}
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @ pf : the PF
* @ locked : is the rtnl_lock already held
*/
static void ice_pf_dis_all_vsi ( struct ice_pf * pf , bool locked )
{
int node ;
int v ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] )
ice_dis_vsi ( pf - > vsi [ v ] , locked ) ;
for ( node = 0 ; node < ICE_MAX_PF_AGG_NODES ; node + + )
pf - > pf_agg_node [ node ] . num_vsis = 0 ;
for ( node = 0 ; node < ICE_MAX_VF_AGG_NODES ; node + + )
pf - > vf_agg_node [ node ] . num_vsis = 0 ;
}
/**
* ice_prepare_for_reset - prep for the core to reset
* @ pf : board private structure
*
* Inform or close all dependent features in prep for reset .
*/
static void
ice_prepare_for_reset ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
unsigned int i ;
/* already prepared for reset */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) )
2021-05-26 19:09:36 +00:00
return ;
2021-07-20 16:20:39 +00:00
ice_unplug_aux_dev ( pf ) ;
2021-05-26 19:09:36 +00:00
/* Notify VFs of impending reset */
if ( ice_check_sq_alive ( hw , & hw - > mailboxq ) )
ice_vc_notify_reset ( pf ) ;
/* Disable VFs until reset is completed */
ice_for_each_vf ( pf , i )
ice_set_vf_state_qs_dis ( & pf - > vf [ i ] ) ;
/* clear SW filtering DB */
ice_clear_hw_tbls ( hw ) ;
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi ( pf , false ) ;
2021-07-20 16:20:39 +00:00
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_release ( pf ) ;
2021-05-26 19:09:36 +00:00
if ( hw - > port_info )
ice_sched_clear_port ( hw - > port_info ) ;
ice_shutdown_all_ctrlq ( hw ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_do_reset - Initiate one of many types of resets
* @ pf : board private structure
* @ reset_type : reset type requested
* before this function was called .
*/
static void ice_do_reset ( struct ice_pf * pf , enum ice_reset_req reset_type )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
dev_dbg ( dev , " reset_type 0x%x requested \n " , reset_type ) ;
ice_prepare_for_reset ( pf ) ;
/* trigger the reset */
if ( ice_reset ( hw , reset_type ) ) {
dev_err ( dev , " reset %d failed \n " , reset_type ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
clear_bit ( ICE_RESET_OICR_RECV , pf - > state ) ;
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
clear_bit ( ICE_CORER_REQ , pf - > state ) ;
clear_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-07-20 16:20:39 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2021-05-26 19:09:36 +00:00
return ;
}
/* PFR is a bit of a special case because it doesn't result in an OICR
* interrupt . So for PFR , rebuild after the reset and clear the reset -
* associated state bits .
*/
if ( reset_type = = ICE_RESET_PFR ) {
pf - > pfr_count + + ;
ice_rebuild ( pf , reset_type ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-07-20 16:20:39 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2021-05-26 19:09:36 +00:00
ice_reset_all_vfs ( pf , true ) ;
}
}
/**
* ice_reset_subtask - Set up for resetting the device and driver
* @ pf : board private structure
*/
static void ice_reset_subtask ( struct ice_pf * pf )
{
enum ice_reset_req reset_type = ICE_RESET_INVAL ;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt . The OICR handler ( ice_misc_intr ) determines what type
* of reset is pending and sets bits in pf - > state indicating the reset
2021-07-12 19:01:19 +00:00
* type and ICE_RESET_OICR_RECV . So , if the latter bit is set
2021-05-26 19:09:36 +00:00
* prepare for pending reset if not already ( for PF software - initiated
* global resets the software should already be prepared for it as
2021-07-12 19:01:19 +00:00
* indicated by ICE_PREPARED_FOR_RESET ; for global resets initiated
2021-05-26 19:09:36 +00:00
* by firmware or software on other PFs , that bit is not set so prepare
* for the reset now ) , poll for reset done , rebuild and return .
*/
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_RESET_OICR_RECV , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
/* Perform the largest reset requested */
2021-07-12 19:01:19 +00:00
if ( test_and_clear_bit ( ICE_CORER_RECV , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_CORER ;
2021-07-12 19:01:19 +00:00
if ( test_and_clear_bit ( ICE_GLOBR_RECV , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_GLOBR ;
2021-07-12 19:01:19 +00:00
if ( test_and_clear_bit ( ICE_EMPR_RECV , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_EMPR ;
/* return if no valid reset type requested */
if ( reset_type = = ICE_RESET_INVAL )
return ;
ice_prepare_for_reset ( pf ) ;
/* make sure we are ready to rebuild */
if ( ice_check_reset ( & pf - > hw ) ) {
2021-07-12 19:01:19 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
} else {
/* done with reset. start rebuild */
pf - > hw . reset_ongoing = false ;
ice_rebuild ( pf , reset_type ) ;
/* clear bit to resume normal operations, but
* ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_RESET_OICR_RECV , pf - > state ) ;
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
clear_bit ( ICE_CORER_REQ , pf - > state ) ;
clear_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-07-20 16:20:39 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2021-05-26 19:09:36 +00:00
ice_reset_all_vfs ( pf , true ) ;
}
return ;
}
/* No pending resets to finish processing. Check for new resets */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_PFR_REQ , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_PFR ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_CORER_REQ , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_CORER ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_GLOBR_REQ , pf - > state ) )
2021-05-26 19:09:36 +00:00
reset_type = ICE_RESET_GLOBR ;
/* If no valid reset type requested just return */
if ( reset_type = = ICE_RESET_INVAL )
return ;
/* reset if not already down or busy */
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_DOWN , pf - > state ) & &
! test_bit ( ICE_CFG_BUSY , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
ice_do_reset ( pf , reset_type ) ;
}
}
/**
* ice_print_topo_conflict - print topology conflict message
* @ vsi : the VSI whose topology status is being checked
*/
static void ice_print_topo_conflict ( struct ice_vsi * vsi )
{
switch ( vsi - > port_info - > phy . link_info . topo_media_conflict ) {
case ICE_AQ_LINK_TOPO_CONFLICT :
case ICE_AQ_LINK_MEDIA_CONFLICT :
case ICE_AQ_LINK_TOPO_UNREACH_PRT :
case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT :
case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA :
2021-07-12 19:01:19 +00:00
netdev_info ( vsi - > netdev , " Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue. \n " ) ;
2021-05-26 19:09:36 +00:00
break ;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA :
netdev_info ( vsi - > netdev , " Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules. \n " ) ;
break ;
default :
break ;
}
}
/**
* ice_print_link_msg - print link up or down message
* @ vsi : the VSI whose link status is being queried
* @ isup : boolean for if the link is now up or down
*/
void ice_print_link_msg ( struct ice_vsi * vsi , bool isup )
{
struct ice_aqc_get_phy_caps_data * caps ;
const char * an_advertised ;
enum ice_status status ;
const char * fec_req ;
const char * speed ;
const char * fec ;
const char * fc ;
const char * an ;
if ( ! vsi )
return ;
if ( vsi - > current_isup = = isup )
return ;
vsi - > current_isup = isup ;
if ( ! isup ) {
netdev_info ( vsi - > netdev , " NIC Link is Down \n " ) ;
return ;
}
switch ( vsi - > port_info - > phy . link_info . link_speed ) {
case ICE_AQ_LINK_SPEED_100GB :
speed = " 100 G " ;
break ;
case ICE_AQ_LINK_SPEED_50GB :
speed = " 50 G " ;
break ;
case ICE_AQ_LINK_SPEED_40GB :
speed = " 40 G " ;
break ;
case ICE_AQ_LINK_SPEED_25GB :
speed = " 25 G " ;
break ;
case ICE_AQ_LINK_SPEED_20GB :
speed = " 20 G " ;
break ;
case ICE_AQ_LINK_SPEED_10GB :
speed = " 10 G " ;
break ;
case ICE_AQ_LINK_SPEED_5GB :
speed = " 5 G " ;
break ;
case ICE_AQ_LINK_SPEED_2500MB :
speed = " 2.5 G " ;
break ;
case ICE_AQ_LINK_SPEED_1000MB :
speed = " 1 G " ;
break ;
case ICE_AQ_LINK_SPEED_100MB :
speed = " 100 M " ;
break ;
default :
speed = " Unknown " ;
break ;
}
switch ( vsi - > port_info - > fc . current_mode ) {
case ICE_FC_FULL :
fc = " Rx/Tx " ;
break ;
case ICE_FC_TX_PAUSE :
fc = " Tx " ;
break ;
case ICE_FC_RX_PAUSE :
fc = " Rx " ;
break ;
case ICE_FC_NONE :
fc = " None " ;
break ;
default :
fc = " Unknown " ;
break ;
}
/* Get FEC mode based on negotiated link info */
switch ( vsi - > port_info - > phy . link_info . fec_info ) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN :
case ICE_AQ_LINK_25G_RS_544_FEC_EN :
fec = " RS-FEC " ;
break ;
case ICE_AQ_LINK_25G_KR_FEC_EN :
fec = " FC-FEC/BASE-R " ;
break ;
default :
fec = " NONE " ;
break ;
}
/* check if autoneg completed, might be false due to not supported */
if ( vsi - > port_info - > phy . link_info . an_info & ICE_AQ_AN_COMPLETED )
an = " True " ;
else
an = " False " ;
/* Get FEC mode requested based on PHY caps last SW configuration */
caps = kzalloc ( sizeof ( * caps ) , GFP_KERNEL ) ;
if ( ! caps ) {
fec_req = " Unknown " ;
an_advertised = " Unknown " ;
goto done ;
}
status = ice_aq_get_phy_caps ( vsi - > port_info , false ,
2021-07-12 19:01:19 +00:00
ICE_AQC_REPORT_ACTIVE_CFG , caps , NULL ) ;
2021-05-26 19:09:36 +00:00
if ( status )
netdev_info ( vsi - > netdev , " Get phy capability failed. \n " ) ;
an_advertised = ice_is_phy_caps_an_enabled ( caps ) ? " On " : " Off " ;
if ( caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ | |
caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ )
fec_req = " RS-FEC " ;
else if ( caps - > link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | |
caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ )
fec_req = " FC-FEC/BASE-R " ;
else
fec_req = " NONE " ;
kfree ( caps ) ;
done :
netdev_info ( vsi - > netdev , " NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s \n " ,
speed , fec_req , fec , an_advertised , an , fc ) ;
ice_print_topo_conflict ( vsi ) ;
}
/**
* ice_vsi_link_event - update the VSI ' s netdev
* @ vsi : the VSI on which the link event occurred
* @ link_up : whether or not the VSI needs to be set up or down
*/
static void ice_vsi_link_event ( struct ice_vsi * vsi , bool link_up )
{
if ( ! vsi )
return ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) | | ! vsi - > netdev )
2021-05-26 19:09:36 +00:00
return ;
if ( vsi - > type = = ICE_VSI_PF ) {
if ( link_up = = netif_carrier_ok ( vsi - > netdev ) )
return ;
if ( link_up ) {
netif_carrier_on ( vsi - > netdev ) ;
netif_tx_wake_all_queues ( vsi - > netdev ) ;
} else {
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_stop_all_queues ( vsi - > netdev ) ;
}
}
}
/**
* ice_set_dflt_mib - send a default config MIB to the FW
* @ pf : private PF struct
*
* This function sends a default configuration MIB to the FW .
*
* If this function errors out at any point , the driver is still able to
* function . The main impact is that LFC may not operate as expected .
* Therefore an error state in this function should be treated with a DBG
* message and continue on with driver rebuild / reenable .
*/
static void ice_set_dflt_mib ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
u8 mib_type , * buf , * lldpmib = NULL ;
u16 len , typelen , offset = 0 ;
struct ice_lldp_org_tlv * tlv ;
struct ice_hw * hw = & pf - > hw ;
u32 ouisubtype ;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB ;
lldpmib = kzalloc ( ICE_LLDPDU_SIZE , GFP_KERNEL ) ;
if ( ! lldpmib ) {
dev_dbg ( dev , " %s Failed to allocate MIB memory \n " ,
__func__ ) ;
return ;
}
/* Add ETS CFG TLV */
tlv = ( struct ice_lldp_org_tlv * ) lldpmib ;
typelen = ( ( ICE_TLV_TYPE_ORG < < ICE_LLDP_TLV_TYPE_S ) |
ICE_IEEE_ETS_TLV_LEN ) ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_ETS_CFG ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
buf = tlv - > tlvinfo ;
buf [ 0 ] = 0 ;
/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
* Octets 5 - 12 are BW values , set octet 5 to 100 % BW .
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf [ 5 ] = 0x64 ;
len = ( typelen & ICE_LLDP_TLV_LEN_M ) > > ICE_LLDP_TLV_LEN_S ;
offset + = len + 2 ;
tlv = ( struct ice_lldp_org_tlv * )
( ( char * ) tlv + sizeof ( tlv - > typelen ) + len ) ;
/* Add ETS REC TLV */
buf = tlv - > tlvinfo ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_ETS_REC ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
/* First octet of buf is reserved
* Octets 1 - 4 map UP to TC - all UPs map to zero
* Octets 5 - 12 are BW values - set TC 0 to 100 % .
* Octets 13 - 20 are TSA value - leave as zeros
*/
buf [ 5 ] = 0x64 ;
offset + = len + 2 ;
tlv = ( struct ice_lldp_org_tlv * )
( ( char * ) tlv + sizeof ( tlv - > typelen ) + len ) ;
/* Add PFC CFG TLV */
typelen = ( ( ICE_TLV_TYPE_ORG < < ICE_LLDP_TLV_TYPE_S ) |
ICE_IEEE_PFC_TLV_LEN ) ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_PFC_CFG ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
/* Octet 1 left as all zeros - PFC disabled */
buf [ 0 ] = 0x08 ;
len = ( typelen & ICE_LLDP_TLV_LEN_M ) > > ICE_LLDP_TLV_LEN_S ;
offset + = len + 2 ;
if ( ice_aq_set_lldp_mib ( hw , mib_type , ( void * ) lldpmib , offset , NULL ) )
dev_dbg ( dev , " %s Failed to set default LLDP MIB \n " , __func__ ) ;
kfree ( lldpmib ) ;
}
2021-07-20 16:20:39 +00:00
/**
* ice_check_module_power
* @ pf : pointer to PF struct
* @ link_cfg_err : bitmap from the link info structure
*
* check module power level returned by a previous call to aq_get_link_info
* and print error messages if module power level is not supported
*/
static void ice_check_module_power ( struct ice_pf * pf , u8 link_cfg_err )
{
/* if module power level is supported, clear the flag */
if ( ! ( link_cfg_err & ( ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED ) ) ) {
clear_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
return ;
}
/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
* above block didn ' t clear this bit , there ' s nothing to do
*/
if ( test_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) )
return ;
if ( link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT ) {
dev_err ( ice_pf_to_dev ( pf ) , " The installed module is incompatible with the device's NVM image. Cannot start link \n " ) ;
set_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
} else if ( link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED ) {
dev_err ( ice_pf_to_dev ( pf ) , " The module's power requirements exceed the device's power supply. Cannot start link \n " ) ;
set_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
}
}
2021-05-26 19:09:36 +00:00
/**
* ice_link_event - process the link event
* @ pf : PF that the link event is associated with
* @ pi : port_info for the port that the link event is associated with
* @ link_up : true if the physical link is up and false if it is down
* @ link_speed : current link speed received from the link event
*
* Returns 0 on success and negative on failure
*/
static int
ice_link_event ( struct ice_pf * pf , struct ice_port_info * pi , bool link_up ,
u16 link_speed )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_phy_info * phy_info ;
2021-07-12 19:01:19 +00:00
enum ice_status status ;
2021-05-26 19:09:36 +00:00
struct ice_vsi * vsi ;
u16 old_link_speed ;
bool old_link ;
phy_info = & pi - > phy ;
phy_info - > link_info_old = phy_info - > link_info ;
old_link = ! ! ( phy_info - > link_info_old . link_info & ICE_AQ_LINK_UP ) ;
old_link_speed = phy_info - > link_info_old . link_speed ;
/* update the link info structures and re-enable link events,
* don ' t bail on failure due to other book keeping needed
*/
2021-07-12 19:01:19 +00:00
status = ice_update_link_info ( pi ) ;
if ( status )
dev_dbg ( dev , " Failed to update link status on port %d, err %s aq_err %s \n " ,
pi - > lport , ice_stat_str ( status ) ,
ice_aq_str ( pi - > hw - > adminq . sq_last_status ) ) ;
2021-05-26 19:09:36 +00:00
2021-07-20 16:20:39 +00:00
ice_check_module_power ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-26 19:09:36 +00:00
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now .
*/
if ( phy_info - > link_info . link_info & ICE_AQ_LINK_UP )
link_up = true ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi | | ! vsi - > port_info )
return - EINVAL ;
/* turn off PHY if media was removed */
if ( ! test_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) & &
! ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) ) {
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-07-12 19:01:19 +00:00
ice_set_link ( vsi , false ) ;
2021-05-26 19:09:36 +00:00
}
/* if the old link up/down and speed is the same as the new */
if ( link_up = = old_link & & link_speed = = old_link_speed )
2021-07-12 19:01:19 +00:00
return 0 ;
2021-05-26 19:09:36 +00:00
if ( ice_is_dcb_active ( pf ) ) {
if ( test_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) )
ice_dcb_rebuild ( pf ) ;
} else {
if ( link_up )
ice_set_dflt_mib ( pf ) ;
}
ice_vsi_link_event ( vsi , link_up ) ;
ice_print_link_msg ( vsi , link_up ) ;
ice_vc_notify_link_state ( pf ) ;
2021-07-12 19:01:19 +00:00
return 0 ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_watchdog_subtask - periodic tasks not using event driven scheduling
* @ pf : board private structure
*/
static void ice_watchdog_subtask ( struct ice_pf * pf )
{
int i ;
/* if interface is down do nothing */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
test_bit ( ICE_CFG_BUSY , pf - > state ) )
2021-05-26 19:09:36 +00:00
return ;
/* make sure we don't do these things too often */
if ( time_before ( jiffies ,
pf - > serv_tmr_prev + pf - > serv_tmr_period ) )
return ;
pf - > serv_tmr_prev = jiffies ;
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats ( pf ) ;
ice_for_each_vsi ( pf , i )
if ( pf - > vsi [ i ] & & pf - > vsi [ i ] - > netdev )
ice_update_vsi_stats ( pf - > vsi [ i ] ) ;
}
/**
* ice_init_link_events - enable / initialize link events
* @ pi : pointer to the port_info instance
*
* Returns - EIO on failure , 0 on success
*/
static int ice_init_link_events ( struct ice_port_info * pi )
{
u16 mask ;
mask = ~ ( ( u16 ) ( ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL ) ) ;
if ( ice_aq_set_event_mask ( pi - > hw , pi - > lport , mask , NULL ) ) {
dev_dbg ( ice_hw_to_dev ( pi - > hw ) , " Failed to set link event mask for port %d \n " ,
pi - > lport ) ;
return - EIO ;
}
if ( ice_aq_get_link_info ( pi , true , NULL , NULL ) ) {
dev_dbg ( ice_hw_to_dev ( pi - > hw ) , " Failed to enable link events for port %d \n " ,
pi - > lport ) ;
return - EIO ;
}
return 0 ;
}
/**
* ice_handle_link_event - handle link event via ARQ
* @ pf : PF that the link event is associated with
* @ event : event structure containing link status info
*/
static int
ice_handle_link_event ( struct ice_pf * pf , struct ice_rq_event_info * event )
{
struct ice_aqc_get_link_status_data * link_data ;
struct ice_port_info * port_info ;
int status ;
link_data = ( struct ice_aqc_get_link_status_data * ) event - > msg_buf ;
port_info = pf - > hw . port_info ;
if ( ! port_info )
return - EINVAL ;
status = ice_link_event ( pf , port_info ,
! ! ( link_data - > link_info & ICE_AQ_LINK_UP ) ,
le16_to_cpu ( link_data - > link_speed ) ) ;
if ( status )
dev_dbg ( ice_pf_to_dev ( pf ) , " Could not process link event, error %d \n " ,
status ) ;
return status ;
}
enum ice_aq_task_state {
ICE_AQ_TASK_WAITING = 0 ,
ICE_AQ_TASK_COMPLETE ,
ICE_AQ_TASK_CANCELED ,
} ;
struct ice_aq_task {
struct hlist_node entry ;
u16 opcode ;
struct ice_rq_event_info * event ;
enum ice_aq_task_state state ;
} ;
/**
2021-07-12 19:01:19 +00:00
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
2021-05-26 19:09:36 +00:00
* @ pf : pointer to the PF private structure
* @ opcode : the opcode to wait for
* @ timeout : how long to wait , in jiffies
* @ event : storage for the event info
*
* Waits for a specific AdminQ completion event on the ARQ for a given PF . The
* current thread will be put to sleep until the specified event occurs or
* until the given timeout is reached .
*
* To obtain only the descriptor contents , pass an event without an allocated
* msg_buf . If the complete data buffer is desired , allocate the
* event - > msg_buf with enough space ahead of time .
*
* Returns : zero on success , or a negative error code on failure .
*/
int ice_aq_wait_for_event ( struct ice_pf * pf , u16 opcode , unsigned long timeout ,
struct ice_rq_event_info * event )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_aq_task * task ;
unsigned long start ;
long ret ;
int err ;
task = kzalloc ( sizeof ( * task ) , GFP_KERNEL ) ;
if ( ! task )
return - ENOMEM ;
INIT_HLIST_NODE ( & task - > entry ) ;
task - > opcode = opcode ;
task - > event = event ;
task - > state = ICE_AQ_TASK_WAITING ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_add_head ( & task - > entry , & pf - > aq_wait_list ) ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
start = jiffies ;
ret = wait_event_interruptible_timeout ( pf - > aq_wait_queue , task - > state ,
timeout ) ;
switch ( task - > state ) {
case ICE_AQ_TASK_WAITING :
err = ret < 0 ? ret : - ETIMEDOUT ;
break ;
case ICE_AQ_TASK_CANCELED :
err = ret < 0 ? ret : - ECANCELED ;
break ;
case ICE_AQ_TASK_COMPLETE :
err = ret < 0 ? ret : 0 ;
break ;
default :
WARN ( 1 , " Unexpected AdminQ wait task state %u " , task - > state ) ;
err = - EINVAL ;
break ;
}
dev_dbg ( dev , " Waited %u msecs (max %u msecs) for firmware response to op 0x%04x \n " ,
jiffies_to_msecs ( jiffies - start ) ,
jiffies_to_msecs ( timeout ) ,
opcode ) ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_del ( & task - > entry ) ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
kfree ( task ) ;
return err ;
}
/**
* ice_aq_check_events - Check if any thread is waiting for an AdminQ event
* @ pf : pointer to the PF private structure
* @ opcode : the opcode of the event
* @ event : the event to check
*
* Loops over the current list of pending threads waiting for an AdminQ event .
* For each matching task , copy the contents of the event into the task
* structure and wake up the thread .
*
* If multiple threads wait for the same opcode , they will all be woken up .
*
* Note that event - > msg_buf will only be duplicated if the event has a buffer
* with enough space already allocated . Otherwise , only the descriptor and
* message length will be copied .
*
* Returns : true if an event was found , false otherwise
*/
static void ice_aq_check_events ( struct ice_pf * pf , u16 opcode ,
struct ice_rq_event_info * event )
{
struct ice_aq_task * task ;
bool found = false ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_for_each_entry ( task , & pf - > aq_wait_list , entry ) {
if ( task - > state | | task - > opcode ! = opcode )
continue ;
memcpy ( & task - > event - > desc , & event - > desc , sizeof ( event - > desc ) ) ;
task - > event - > msg_len = event - > msg_len ;
/* Only copy the data buffer if a destination was set */
if ( task - > event - > msg_buf & &
task - > event - > buf_len > event - > buf_len ) {
memcpy ( task - > event - > msg_buf , event - > msg_buf ,
event - > buf_len ) ;
task - > event - > buf_len = event - > buf_len ;
}
task - > state = ICE_AQ_TASK_COMPLETE ;
found = true ;
}
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
if ( found )
wake_up ( & pf - > aq_wait_queue ) ;
}
/**
* ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
* @ pf : the PF private structure
*
* Set all waiting tasks to ICE_AQ_TASK_CANCELED , and wake up their threads .
* This will then cause ice_aq_wait_for_event to exit with - ECANCELED .
*/
static void ice_aq_cancel_waiting_tasks ( struct ice_pf * pf )
{
struct ice_aq_task * task ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_for_each_entry ( task , & pf - > aq_wait_list , entry )
task - > state = ICE_AQ_TASK_CANCELED ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
wake_up ( & pf - > aq_wait_queue ) ;
}
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @ pf : ptr to struct ice_pf
* @ q_type : specific Control queue type
*/
static int __ice_clean_ctrlq ( struct ice_pf * pf , enum ice_ctl_q q_type )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_rq_event_info event ;
struct ice_hw * hw = & pf - > hw ;
struct ice_ctl_q_info * cq ;
u16 pending , i = 0 ;
const char * qtype ;
u32 oldval , val ;
/* Do not clean control queue if/when PF reset fails */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_RESET_FAILED , pf - > state ) )
2021-05-26 19:09:36 +00:00
return 0 ;
switch ( q_type ) {
case ICE_CTL_Q_ADMIN :
cq = & hw - > adminq ;
qtype = " Admin " ;
break ;
2021-07-20 16:20:39 +00:00
case ICE_CTL_Q_SB :
cq = & hw - > sbq ;
qtype = " Sideband " ;
break ;
2021-05-26 19:09:36 +00:00
case ICE_CTL_Q_MAILBOX :
cq = & hw - > mailboxq ;
qtype = " Mailbox " ;
2021-07-12 19:01:19 +00:00
/* we are going to try to detect a malicious VF, so set the
* state to begin detection
*/
hw - > mbx_snapshot . mbx_buf . state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT ;
2021-05-26 19:09:36 +00:00
break ;
default :
dev_warn ( dev , " Unknown control queue type 0x%x \n " , q_type ) ;
return 0 ;
}
/* check for error indications - PF_xx_AxQLEN register layout for
* FW / MBX / SB are identical so just use defines for PF_FW_AxQLEN .
*/
val = rd32 ( hw , cq - > rq . len ) ;
if ( val & ( PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M ) ) {
oldval = val ;
if ( val & PF_FW_ARQLEN_ARQVFE_M )
dev_dbg ( dev , " %s Receive Queue VF Error detected \n " ,
qtype ) ;
if ( val & PF_FW_ARQLEN_ARQOVFL_M ) {
dev_dbg ( dev , " %s Receive Queue Overflow Error detected \n " ,
qtype ) ;
}
if ( val & PF_FW_ARQLEN_ARQCRIT_M )
dev_dbg ( dev , " %s Receive Queue Critical Error detected \n " ,
qtype ) ;
val & = ~ ( PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M ) ;
if ( oldval ! = val )
wr32 ( hw , cq - > rq . len , val ) ;
}
val = rd32 ( hw , cq - > sq . len ) ;
if ( val & ( PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M ) ) {
oldval = val ;
if ( val & PF_FW_ATQLEN_ATQVFE_M )
dev_dbg ( dev , " %s Send Queue VF Error detected \n " ,
qtype ) ;
if ( val & PF_FW_ATQLEN_ATQOVFL_M ) {
dev_dbg ( dev , " %s Send Queue Overflow Error detected \n " ,
qtype ) ;
}
if ( val & PF_FW_ATQLEN_ATQCRIT_M )
dev_dbg ( dev , " %s Send Queue Critical Error detected \n " ,
qtype ) ;
val & = ~ ( PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M ) ;
if ( oldval ! = val )
wr32 ( hw , cq - > sq . len , val ) ;
}
event . buf_len = cq - > rq_buf_size ;
event . msg_buf = kzalloc ( event . buf_len , GFP_KERNEL ) ;
if ( ! event . msg_buf )
return 0 ;
do {
enum ice_status ret ;
u16 opcode ;
ret = ice_clean_rq_elem ( hw , cq , & event , & pending ) ;
if ( ret = = ICE_ERR_AQ_NO_WORK )
break ;
if ( ret ) {
dev_err ( dev , " %s Receive Queue event error %s \n " , qtype ,
ice_stat_str ( ret ) ) ;
break ;
}
opcode = le16_to_cpu ( event . desc . opcode ) ;
/* Notify any thread that might be waiting for this event */
ice_aq_check_events ( pf , opcode , & event ) ;
switch ( opcode ) {
case ice_aqc_opc_get_link_status :
if ( ice_handle_link_event ( pf , & event ) )
dev_err ( dev , " Could not handle link event \n " ) ;
break ;
case ice_aqc_opc_event_lan_overflow :
ice_vf_lan_overflow_event ( pf , & event ) ;
break ;
case ice_mbx_opc_send_msg_to_pf :
2021-07-12 19:01:19 +00:00
if ( ! ice_is_malicious_vf ( pf , & event , i , pending ) )
ice_vc_process_vf_msg ( pf , & event ) ;
2021-05-26 19:09:36 +00:00
break ;
case ice_aqc_opc_fw_logging :
ice_output_fw_log ( hw , & event . desc , event . msg_buf ) ;
break ;
case ice_aqc_opc_lldp_set_mib_change :
ice_dcb_process_lldp_set_mib_change ( pf , & event ) ;
break ;
default :
dev_dbg ( dev , " %s Receive Queue unknown event 0x%04x ignored \n " ,
qtype , opcode ) ;
break ;
}
} while ( pending & & ( i + + < ICE_DFLT_IRQ_WORK ) ) ;
kfree ( event . msg_buf ) ;
return pending & & ( i = = ICE_DFLT_IRQ_WORK ) ;
}
/**
* ice_ctrlq_pending - check if there is a difference between ntc and ntu
* @ hw : pointer to hardware info
* @ cq : control queue information
*
* returns true if there are pending messages in a queue , false if there aren ' t
*/
static bool ice_ctrlq_pending ( struct ice_hw * hw , struct ice_ctl_q_info * cq )
{
u16 ntu ;
ntu = ( u16 ) ( rd32 ( hw , cq - > rq . head ) & cq - > rq . head_mask ) ;
return cq - > rq . next_to_clean ! = ntu ;
}
/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @ pf : board private structure
*/
static void ice_clean_adminq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) )
2021-05-26 19:09:36 +00:00
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_ADMIN ) )
return ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
/* There might be a situation where new messages arrive to a control
* queue between processing the last message and clearing the
* EVENT_PENDING bit . So before exiting , check queue head again ( using
* ice_ctrlq_pending ) and process new messages if any .
*/
if ( ice_ctrlq_pending ( hw , & hw - > adminq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_ADMIN ) ;
ice_flush ( hw ) ;
}
/**
* ice_clean_mailboxq_subtask - clean the MailboxQ rings
* @ pf : board private structure
*/
static void ice_clean_mailboxq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) )
2021-05-26 19:09:36 +00:00
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_MAILBOX ) )
return ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( ice_ctrlq_pending ( hw , & hw - > mailboxq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_MAILBOX ) ;
ice_flush ( hw ) ;
}
2021-07-20 16:20:39 +00:00
/**
* ice_clean_sbq_subtask - clean the Sideband Queue rings
* @ pf : board private structure
*/
static void ice_clean_sbq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
/* Nothing to do here if sideband queue is not supported */
if ( ! ice_is_sbq_supported ( hw ) ) {
clear_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
return ;
}
if ( ! test_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) )
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_SB ) )
return ;
clear_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
if ( ice_ctrlq_pending ( hw , & hw - > sbq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_SB ) ;
ice_flush ( hw ) ;
}
2021-05-26 19:09:36 +00:00
/**
* ice_service_task_schedule - schedule the service task to wake up
* @ pf : board private structure
*
* If not already scheduled , this puts the task into the work queue .
*/
void ice_service_task_schedule ( struct ice_pf * pf )
{
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_SERVICE_DIS , pf - > state ) & &
! test_and_set_bit ( ICE_SERVICE_SCHED , pf - > state ) & &
! test_bit ( ICE_NEEDS_RESTART , pf - > state ) )
2021-05-26 19:09:36 +00:00
queue_work ( ice_wq , & pf - > serv_task ) ;
}
/**
* ice_service_task_complete - finish up the service task
* @ pf : board private structure
*/
static void ice_service_task_complete ( struct ice_pf * pf )
{
2021-07-12 19:01:19 +00:00
WARN_ON ( ! test_bit ( ICE_SERVICE_SCHED , pf - > state ) ) ;
2021-05-26 19:09:36 +00:00
/* force memory (pf->state) to sync before next service task */
smp_mb__before_atomic ( ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_service_task_stop - stop service task and cancel works
* @ pf : board private structure
*
2021-07-12 19:01:19 +00:00
* Return 0 if the ICE_SERVICE_DIS bit was not already set ,
2021-05-26 19:09:36 +00:00
* 1 otherwise .
*/
static int ice_service_task_stop ( struct ice_pf * pf )
{
int ret ;
2021-07-12 19:01:19 +00:00
ret = test_and_set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( pf - > serv_tmr . function )
del_timer_sync ( & pf - > serv_tmr ) ;
if ( pf - > serv_task . func )
cancel_work_sync ( & pf - > serv_task ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
return ret ;
}
/**
* ice_service_task_restart - restart service task and schedule works
* @ pf : board private structure
*
* This function is needed for suspend and resume works ( e . g WoL scenario )
*/
static void ice_service_task_restart ( struct ice_pf * pf )
{
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_service_task_schedule ( pf ) ;
}
/**
* ice_service_timer - timer callback to schedule service task
* @ t : pointer to timer_list
*/
static void ice_service_timer ( struct timer_list * t )
{
struct ice_pf * pf = from_timer ( pf , t , serv_tmr ) ;
mod_timer ( & pf - > serv_tmr , round_jiffies ( pf - > serv_tmr_period + jiffies ) ) ;
ice_service_task_schedule ( pf ) ;
}
/**
* ice_handle_mdd_event - handle malicious driver detect event
* @ pf : pointer to the PF structure
*
* Called from service task . OICR interrupt handler indicates MDD event .
* VF MDD logging is guarded by net_ratelimit . Additional PF and VF log
* messages are wrapped by netif_msg_ [ rx | tx ] _err . Since VF Rx MDD events
* disable the queue , the PF can be configured to reset the VF using ethtool
* private flag mdd - auto - reset - vf .
*/
static void ice_handle_mdd_event ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
unsigned int i ;
u32 reg ;
2021-07-12 19:01:19 +00:00
if ( ! test_and_clear_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
/* Since the VF MDD event logging is rate limited, check if
* there are pending MDD events .
*/
ice_print_vfs_mdd_events ( pf ) ;
return ;
}
/* find what triggered an MDD event */
reg = rd32 ( hw , GL_MDET_TX_PQM ) ;
if ( reg & GL_MDET_TX_PQM_VALID_M ) {
u8 pf_num = ( reg & GL_MDET_TX_PQM_PF_NUM_M ) > >
GL_MDET_TX_PQM_PF_NUM_S ;
u16 vf_num = ( reg & GL_MDET_TX_PQM_VF_NUM_M ) > >
GL_MDET_TX_PQM_VF_NUM_S ;
u8 event = ( reg & GL_MDET_TX_PQM_MAL_TYPE_M ) > >
GL_MDET_TX_PQM_MAL_TYPE_S ;
u16 queue = ( ( reg & GL_MDET_TX_PQM_QNUM_M ) > >
GL_MDET_TX_PQM_QNUM_S ) ;
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d \n " ,
event , queue , pf_num , vf_num ) ;
wr32 ( hw , GL_MDET_TX_PQM , 0xffffffff ) ;
}
reg = rd32 ( hw , GL_MDET_TX_TCLAN ) ;
if ( reg & GL_MDET_TX_TCLAN_VALID_M ) {
u8 pf_num = ( reg & GL_MDET_TX_TCLAN_PF_NUM_M ) > >
GL_MDET_TX_TCLAN_PF_NUM_S ;
u16 vf_num = ( reg & GL_MDET_TX_TCLAN_VF_NUM_M ) > >
GL_MDET_TX_TCLAN_VF_NUM_S ;
u8 event = ( reg & GL_MDET_TX_TCLAN_MAL_TYPE_M ) > >
GL_MDET_TX_TCLAN_MAL_TYPE_S ;
u16 queue = ( ( reg & GL_MDET_TX_TCLAN_QNUM_M ) > >
GL_MDET_TX_TCLAN_QNUM_S ) ;
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d \n " ,
event , queue , pf_num , vf_num ) ;
wr32 ( hw , GL_MDET_TX_TCLAN , 0xffffffff ) ;
}
reg = rd32 ( hw , GL_MDET_RX ) ;
if ( reg & GL_MDET_RX_VALID_M ) {
u8 pf_num = ( reg & GL_MDET_RX_PF_NUM_M ) > >
GL_MDET_RX_PF_NUM_S ;
u16 vf_num = ( reg & GL_MDET_RX_VF_NUM_M ) > >
GL_MDET_RX_VF_NUM_S ;
u8 event = ( reg & GL_MDET_RX_MAL_TYPE_M ) > >
GL_MDET_RX_MAL_TYPE_S ;
u16 queue = ( ( reg & GL_MDET_RX_QNUM_M ) > >
GL_MDET_RX_QNUM_S ) ;
if ( netif_msg_rx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d \n " ,
event , queue , pf_num , vf_num ) ;
wr32 ( hw , GL_MDET_RX , 0xffffffff ) ;
}
/* check to see if this PF caused an MDD event */
reg = rd32 ( hw , PF_MDET_TX_PQM ) ;
if ( reg & PF_MDET_TX_PQM_VALID_M ) {
wr32 ( hw , PF_MDET_TX_PQM , 0xFFFF ) ;
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_PQM detected on PF \n " ) ;
}
reg = rd32 ( hw , PF_MDET_TX_TCLAN ) ;
if ( reg & PF_MDET_TX_TCLAN_VALID_M ) {
wr32 ( hw , PF_MDET_TX_TCLAN , 0xFFFF ) ;
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TCLAN detected on PF \n " ) ;
}
reg = rd32 ( hw , PF_MDET_RX ) ;
if ( reg & PF_MDET_RX_VALID_M ) {
wr32 ( hw , PF_MDET_RX , 0xFFFF ) ;
if ( netif_msg_rx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event RX detected on PF \n " ) ;
}
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
ice_for_each_vf ( pf , i ) {
struct ice_vf * vf = & pf - > vf [ i ] ;
reg = rd32 ( hw , VP_MDET_TX_PQM ( i ) ) ;
if ( reg & VP_MDET_TX_PQM_VALID_M ) {
wr32 ( hw , VP_MDET_TX_PQM ( i ) , 0xFFFF ) ;
vf - > mdd_tx_events . count + + ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_PQM detected on VF %d \n " ,
i ) ;
}
reg = rd32 ( hw , VP_MDET_TX_TCLAN ( i ) ) ;
if ( reg & VP_MDET_TX_TCLAN_VALID_M ) {
wr32 ( hw , VP_MDET_TX_TCLAN ( i ) , 0xFFFF ) ;
vf - > mdd_tx_events . count + + ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TCLAN detected on VF %d \n " ,
i ) ;
}
reg = rd32 ( hw , VP_MDET_TX_TDPU ( i ) ) ;
if ( reg & VP_MDET_TX_TDPU_VALID_M ) {
wr32 ( hw , VP_MDET_TX_TDPU ( i ) , 0xFFFF ) ;
vf - > mdd_tx_events . count + + ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TDPU detected on VF %d \n " ,
i ) ;
}
reg = rd32 ( hw , VP_MDET_RX ( i ) ) ;
if ( reg & VP_MDET_RX_VALID_M ) {
wr32 ( hw , VP_MDET_RX ( i ) , 0xFFFF ) ;
vf - > mdd_rx_events . count + + ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( netif_msg_rx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event RX detected on VF %d \n " ,
i ) ;
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
* private flag mdd - auto - reset - vf .
*/
if ( test_bit ( ICE_FLAG_MDD_AUTO_RESET_VF , pf - > flags ) ) {
/* VF MDD event counters will be cleared by
* reset , so print the event prior to reset .
*/
ice_print_vf_rx_mdd_event ( vf ) ;
ice_reset_vf ( & pf - > vf [ i ] , false ) ;
}
}
}
ice_print_vfs_mdd_events ( pf ) ;
}
/**
* ice_force_phys_link_state - Force the physical link state
* @ vsi : VSI to force the physical link state to up / down
* @ link_up : true / false indicates to set the physical link to up / down
*
* Force the physical link state by getting the current PHY capabilities from
* hardware and setting the PHY config based on the determined capabilities . If
* link changes a link event will be triggered because both the Enable Automatic
* Link Update and LESM Enable bits are set when setting the PHY capabilities .
*
* Returns 0 on success , negative on failure
*/
static int ice_force_phys_link_state ( struct ice_vsi * vsi , bool link_up )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_aqc_set_phy_cfg_data * cfg ;
struct ice_port_info * pi ;
struct device * dev ;
int retcode ;
if ( ! vsi | | ! vsi - > port_info | | ! vsi - > back )
return - EINVAL ;
if ( vsi - > type ! = ICE_VSI_PF )
return 0 ;
dev = ice_pf_to_dev ( vsi - > back ) ;
pi = vsi - > port_info ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
2021-07-12 19:01:19 +00:00
retcode = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_ACTIVE_CFG , pcaps ,
2021-05-26 19:09:36 +00:00
NULL ) ;
if ( retcode ) {
dev_err ( dev , " Failed to get phy capabilities, VSI %d error %d \n " ,
vsi - > vsi_num , retcode ) ;
retcode = - EIO ;
goto out ;
}
/* No change in link */
if ( link_up = = ! ! ( pcaps - > caps & ICE_AQC_PHY_EN_LINK ) & &
link_up = = ! ! ( pi - > phy . link_info . link_info & ICE_AQ_LINK_UP ) )
goto out ;
/* Use the current user PHY configuration. The current user PHY
* configuration is initialized during probe from PHY capabilities
* software mode , and updated on set PHY configuration .
*/
cfg = kmemdup ( & pi - > phy . curr_user_phy_cfg , sizeof ( * cfg ) , GFP_KERNEL ) ;
if ( ! cfg ) {
retcode = - ENOMEM ;
goto out ;
}
cfg - > caps | = ICE_AQ_PHY_ENA_AUTO_LINK_UPDT ;
if ( link_up )
cfg - > caps | = ICE_AQ_PHY_ENA_LINK ;
else
cfg - > caps & = ~ ICE_AQ_PHY_ENA_LINK ;
retcode = ice_aq_set_phy_cfg ( & vsi - > back - > hw , pi , cfg , NULL ) ;
if ( retcode ) {
dev_err ( dev , " Failed to set phy config, VSI %d error %d \n " ,
vsi - > vsi_num , retcode ) ;
retcode = - EIO ;
}
kfree ( cfg ) ;
out :
kfree ( pcaps ) ;
return retcode ;
}
/**
* ice_init_nvm_phy_type - Initialize the NVM PHY type
* @ pi : port info structure
*
* Initialize nvm_phy_type_ [ low | high ] for link lenient mode support
*/
static int ice_init_nvm_phy_type ( struct ice_port_info * pi )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_pf * pf = pi - > hw - > back ;
enum ice_status status ;
int err = 0 ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
2021-07-12 19:01:19 +00:00
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA , pcaps ,
2021-05-26 19:09:36 +00:00
NULL ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( pf ) , " Get PHY capability failed. \n " ) ;
err = - EIO ;
goto out ;
}
pf - > nvm_phy_type_hi = pcaps - > phy_type_high ;
pf - > nvm_phy_type_lo = pcaps - > phy_type_low ;
out :
kfree ( pcaps ) ;
return err ;
}
/**
* ice_init_link_dflt_override - Initialize link default override
* @ pi : port info structure
*
* Initialize link default override and PHY total port shutdown during probe
*/
static void ice_init_link_dflt_override ( struct ice_port_info * pi )
{
struct ice_link_default_override_tlv * ldo ;
struct ice_pf * pf = pi - > hw - > back ;
ldo = & pf - > link_dflt_override ;
if ( ice_get_link_default_override ( ldo , pi ) )
return ;
if ( ! ( ldo - > options & ICE_LINK_OVERRIDE_PORT_DIS ) )
return ;
/* Enable Total Port Shutdown (override/replace link-down-on-close
* ethtool private flag ) for ports with Port Disable bit set .
*/
set_bit ( ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA , pf - > flags ) ;
set_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) ;
}
/**
* ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
* @ pi : port info structure
*
2021-07-12 19:01:19 +00:00
* If default override is enabled , initialize the user PHY cfg speed and FEC
2021-05-26 19:09:36 +00:00
* settings using the default override mask from the NVM .
*
* The PHY should only be configured with the default override settings the
2021-07-12 19:01:19 +00:00
* first time media is available . The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2021-05-26 19:09:36 +00:00
* is used to indicate that the user PHY cfg default override is initialized
* and the PHY has not been configured with the default override settings . The
* state is set here , and cleared in ice_configure_phy the first time the PHY is
* configured .
2021-07-12 19:01:19 +00:00
*
* This function should be called only if the FW doesn ' t support default
* configuration mode , as reported by ice_fw_supports_report_dflt_cfg .
2021-05-26 19:09:36 +00:00
*/
static void ice_init_phy_cfg_dflt_override ( struct ice_port_info * pi )
{
struct ice_link_default_override_tlv * ldo ;
struct ice_aqc_set_phy_cfg_data * cfg ;
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = pi - > hw - > back ;
ldo = & pf - > link_dflt_override ;
/* If link default override is enabled, use to mask NVM PHY capabilities
* for speed and FEC default configuration .
*/
cfg = & phy - > curr_user_phy_cfg ;
if ( ldo - > phy_type_low | | ldo - > phy_type_high ) {
cfg - > phy_type_low = pf - > nvm_phy_type_lo &
cpu_to_le64 ( ldo - > phy_type_low ) ;
cfg - > phy_type_high = pf - > nvm_phy_type_hi &
cpu_to_le64 ( ldo - > phy_type_high ) ;
}
cfg - > link_fec_opt = ldo - > fec_options ;
phy - > curr_user_fec_req = ICE_FEC_AUTO ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_LINK_DEFAULT_OVERRIDE_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_init_phy_user_cfg - Initialize the PHY user configuration
* @ pi : port info structure
*
* Initialize the current user PHY configuration , speed , FEC , and FC requested
* mode to default . The PHY defaults are from get PHY capabilities topology
* with media so call when media is first available . An error is returned if
* called when media is not available . The PHY initialization completed state is
* set here .
*
* These configurations are used when setting PHY
* configuration . The user PHY configuration is updated on set PHY
* configuration . Returns 0 on success , negative on failure
*/
static int ice_init_phy_user_cfg ( struct ice_port_info * pi )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = pi - > hw - > back ;
enum ice_status status ;
int err = 0 ;
if ( ! ( phy - > link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) )
return - EIO ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
2021-07-12 19:01:19 +00:00
if ( ice_fw_supports_report_dflt_cfg ( pi - > hw ) )
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_DFLT_CFG ,
pcaps , NULL ) ;
else
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_MEDIA ,
pcaps , NULL ) ;
2021-05-26 19:09:36 +00:00
if ( status ) {
dev_err ( ice_pf_to_dev ( pf ) , " Get PHY capability failed. \n " ) ;
err = - EIO ;
goto err_out ;
}
ice_copy_phy_caps_to_cfg ( pi , pcaps , & pi - > phy . curr_user_phy_cfg ) ;
/* check if lenient mode is supported and enabled */
2021-07-12 19:01:19 +00:00
if ( ice_fw_supports_link_override ( pi - > hw ) & &
2021-05-26 19:09:36 +00:00
! ( pcaps - > module_compliance_enforcement &
ICE_AQC_MOD_ENFORCE_STRICT_MODE ) ) {
set_bit ( ICE_FLAG_LINK_LENIENT_MODE_ENA , pf - > flags ) ;
2021-07-12 19:01:19 +00:00
/* if the FW supports default PHY configuration mode, then the driver
* does not have to apply link override settings . If not ,
* initialize user PHY configuration with link override values
2021-05-26 19:09:36 +00:00
*/
2021-07-12 19:01:19 +00:00
if ( ! ice_fw_supports_report_dflt_cfg ( pi - > hw ) & &
( pf - > link_dflt_override . options & ICE_LINK_OVERRIDE_EN ) ) {
2021-05-26 19:09:36 +00:00
ice_init_phy_cfg_dflt_override ( pi ) ;
goto out ;
}
}
2021-07-12 19:01:19 +00:00
/* if link default override is not enabled, set user flow control and
* FEC settings based on what get_phy_caps returned
2021-05-26 19:09:36 +00:00
*/
phy - > curr_user_fec_req = ice_caps_to_fec_mode ( pcaps - > caps ,
pcaps - > link_fec_options ) ;
phy - > curr_user_fc_req = ice_caps_to_fc_mode ( pcaps - > caps ) ;
out :
phy - > curr_user_speed_req = ICE_AQ_LINK_SPEED_M ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) ;
2021-05-26 19:09:36 +00:00
err_out :
kfree ( pcaps ) ;
return err ;
}
/**
* ice_configure_phy - configure PHY
* @ vsi : VSI of PHY
*
* Set the PHY configuration . If the current PHY configuration is the same as
* the curr_user_phy_cfg , then do nothing to avoid link flap . Otherwise
* configure the based get PHY capabilities for topology with media .
*/
static int ice_configure_phy ( struct ice_vsi * vsi )
{
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
2021-07-12 19:01:19 +00:00
struct ice_port_info * pi = vsi - > port_info ;
2021-05-26 19:09:36 +00:00
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_aqc_set_phy_cfg_data * cfg ;
2021-07-12 19:01:19 +00:00
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = vsi - > back ;
2021-05-26 19:09:36 +00:00
enum ice_status status ;
int err = 0 ;
/* Ensure we have media as we cannot configure a medialess port */
2021-07-12 19:01:19 +00:00
if ( ! ( phy - > link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) )
2021-05-26 19:09:36 +00:00
return - EPERM ;
ice_print_topo_conflict ( vsi ) ;
2021-07-12 19:01:19 +00:00
if ( phy - > link_info . topo_media_conflict = = ICE_AQ_LINK_TOPO_UNSUPP_MEDIA )
2021-05-26 19:09:36 +00:00
return - EPERM ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) )
2021-05-26 19:09:36 +00:00
return ice_force_phys_link_state ( vsi , true ) ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
/* Get current PHY config */
2021-07-12 19:01:19 +00:00
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_ACTIVE_CFG , pcaps ,
2021-05-26 19:09:36 +00:00
NULL ) ;
if ( status ) {
dev_err ( dev , " Failed to get PHY configuration, VSI %d error %s \n " ,
vsi - > vsi_num , ice_stat_str ( status ) ) ;
err = - EIO ;
goto done ;
}
/* If PHY enable link is configured and configuration has not changed,
* there ' s nothing to do
*/
if ( pcaps - > caps & ICE_AQC_PHY_EN_LINK & &
2021-07-12 19:01:19 +00:00
ice_phy_caps_equals_cfg ( pcaps , & phy - > curr_user_phy_cfg ) )
2021-05-26 19:09:36 +00:00
goto done ;
/* Use PHY topology as baseline for configuration */
memset ( pcaps , 0 , sizeof ( * pcaps ) ) ;
2021-07-12 19:01:19 +00:00
if ( ice_fw_supports_report_dflt_cfg ( pi - > hw ) )
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_DFLT_CFG ,
pcaps , NULL ) ;
else
status = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_MEDIA ,
pcaps , NULL ) ;
2021-05-26 19:09:36 +00:00
if ( status ) {
2021-07-12 19:01:19 +00:00
dev_err ( dev , " Failed to get PHY caps, VSI %d error %s \n " ,
2021-05-26 19:09:36 +00:00
vsi - > vsi_num , ice_stat_str ( status ) ) ;
err = - EIO ;
goto done ;
}
cfg = kzalloc ( sizeof ( * cfg ) , GFP_KERNEL ) ;
if ( ! cfg ) {
err = - ENOMEM ;
goto done ;
}
ice_copy_phy_caps_to_cfg ( pi , pcaps , cfg ) ;
/* Speed - If default override pending, use curr_user_phy_cfg set in
* ice_init_phy_user_cfg_ldo .
*/
2021-07-12 19:01:19 +00:00
if ( test_and_clear_bit ( ICE_LINK_DEFAULT_OVERRIDE_PENDING ,
2021-05-26 19:09:36 +00:00
vsi - > back - > state ) ) {
2021-07-12 19:01:19 +00:00
cfg - > phy_type_low = phy - > curr_user_phy_cfg . phy_type_low ;
cfg - > phy_type_high = phy - > curr_user_phy_cfg . phy_type_high ;
2021-05-26 19:09:36 +00:00
} else {
u64 phy_low = 0 , phy_high = 0 ;
ice_update_phy_type ( & phy_low , & phy_high ,
pi - > phy . curr_user_speed_req ) ;
cfg - > phy_type_low = pcaps - > phy_type_low & cpu_to_le64 ( phy_low ) ;
cfg - > phy_type_high = pcaps - > phy_type_high &
cpu_to_le64 ( phy_high ) ;
}
/* Can't provide what was requested; use PHY capabilities */
if ( ! cfg - > phy_type_low & & ! cfg - > phy_type_high ) {
cfg - > phy_type_low = pcaps - > phy_type_low ;
cfg - > phy_type_high = pcaps - > phy_type_high ;
}
/* FEC */
2021-07-12 19:01:19 +00:00
ice_cfg_phy_fec ( pi , cfg , phy - > curr_user_fec_req ) ;
2021-05-26 19:09:36 +00:00
/* Can't provide what was requested; use PHY capabilities */
if ( cfg - > link_fec_opt ! =
( cfg - > link_fec_opt & pcaps - > link_fec_options ) ) {
cfg - > caps | = pcaps - > caps & ICE_AQC_PHY_EN_AUTO_FEC ;
cfg - > link_fec_opt = pcaps - > link_fec_options ;
}
/* Flow Control - always supported; no need to check against
* capabilities
*/
2021-07-12 19:01:19 +00:00
ice_cfg_phy_fc ( pi , cfg , phy - > curr_user_fc_req ) ;
2021-05-26 19:09:36 +00:00
/* Enable link and link update */
cfg - > caps | = ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK ;
2021-07-12 19:01:19 +00:00
status = ice_aq_set_phy_cfg ( & pf - > hw , pi , cfg , NULL ) ;
2021-05-26 19:09:36 +00:00
if ( status ) {
dev_err ( dev , " Failed to set phy config, VSI %d error %s \n " ,
vsi - > vsi_num , ice_stat_str ( status ) ) ;
err = - EIO ;
}
kfree ( cfg ) ;
done :
kfree ( pcaps ) ;
return err ;
}
/**
* ice_check_media_subtask - Check for media
* @ pf : pointer to PF struct
*
* If media is available , then initialize PHY user configuration if it is not
* been , and configure the PHY if the interface is up .
*/
static void ice_check_media_subtask ( struct ice_pf * pf )
{
struct ice_port_info * pi ;
struct ice_vsi * vsi ;
int err ;
/* No need to check for media if it's already present */
if ( ! test_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) )
return ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
return ;
/* Refresh link info and check if media is present */
pi = vsi - > port_info ;
err = ice_update_link_info ( pi ) ;
if ( err )
return ;
2021-07-20 16:20:39 +00:00
ice_check_module_power ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-26 19:09:36 +00:00
if ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) {
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) )
2021-05-26 19:09:36 +00:00
ice_init_phy_user_cfg ( pi ) ;
/* PHY settings are reset on media insertion, reconfigure
* PHY to preserve settings .
*/
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) & &
2021-05-26 19:09:36 +00:00
test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , vsi - > back - > flags ) )
return ;
err = ice_configure_phy ( vsi ) ;
if ( ! err )
clear_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
*/
}
}
/**
* ice_service_task - manage and run subtasks
* @ work : pointer to work_struct contained by the PF struct
*/
static void ice_service_task ( struct work_struct * work )
{
struct ice_pf * pf = container_of ( work , struct ice_pf , serv_task ) ;
unsigned long start_time = jiffies ;
/* subtasks */
/* process reset requests first */
ice_reset_subtask ( pf ) ;
/* bail if a reset/recovery cycle is pending or rebuild failed */
if ( ice_is_reset_in_progress ( pf - > state ) | |
2021-07-12 19:01:19 +00:00
test_bit ( ICE_SUSPENDED , pf - > state ) | |
test_bit ( ICE_NEEDS_RESTART , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
ice_service_task_complete ( pf ) ;
return ;
}
ice_clean_adminq_subtask ( pf ) ;
ice_check_media_subtask ( pf ) ;
ice_check_for_hang_subtask ( pf ) ;
ice_sync_fltr_subtask ( pf ) ;
ice_handle_mdd_event ( pf ) ;
ice_watchdog_subtask ( pf ) ;
if ( ice_is_safe_mode ( pf ) ) {
ice_service_task_complete ( pf ) ;
return ;
}
ice_process_vflr_event ( pf ) ;
ice_clean_mailboxq_subtask ( pf ) ;
2021-07-20 16:20:39 +00:00
ice_clean_sbq_subtask ( pf ) ;
2021-05-26 19:09:36 +00:00
ice_sync_arfs_fltrs ( pf ) ;
2021-07-12 19:01:19 +00:00
ice_flush_fdir_ctx ( pf ) ;
/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2021-05-26 19:09:36 +00:00
ice_service_task_complete ( pf ) ;
/* If the tasks have taken longer than one service timer period
* or there is more work to be done , reset the service timer to
* schedule the service task now .
*/
if ( time_after ( jiffies , ( start_time + pf - > serv_tmr_period ) ) | |
2021-07-12 19:01:19 +00:00
test_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_VFLR_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_FD_VF_FLUSH_CTX , pf - > state ) | |
2021-07-20 16:20:39 +00:00
test_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) | |
2021-07-12 19:01:19 +00:00
test_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) )
2021-05-26 19:09:36 +00:00
mod_timer ( & pf - > serv_tmr , jiffies ) ;
}
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @ hw : pointer to the HW instance
*/
static void ice_set_ctrlq_len ( struct ice_hw * hw )
{
hw - > adminq . num_rq_entries = ICE_AQ_LEN ;
hw - > adminq . num_sq_entries = ICE_AQ_LEN ;
hw - > adminq . rq_buf_size = ICE_AQ_MAX_BUF_LEN ;
hw - > adminq . sq_buf_size = ICE_AQ_MAX_BUF_LEN ;
hw - > mailboxq . num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M ;
hw - > mailboxq . num_sq_entries = ICE_MBXSQ_LEN ;
hw - > mailboxq . rq_buf_size = ICE_MBXQ_MAX_BUF_LEN ;
hw - > mailboxq . sq_buf_size = ICE_MBXQ_MAX_BUF_LEN ;
2021-07-20 16:20:39 +00:00
hw - > sbq . num_rq_entries = ICE_SBQ_LEN ;
hw - > sbq . num_sq_entries = ICE_SBQ_LEN ;
hw - > sbq . rq_buf_size = ICE_SBQ_MAX_BUF_LEN ;
hw - > sbq . sq_buf_size = ICE_SBQ_MAX_BUF_LEN ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_schedule_reset - schedule a reset
* @ pf : board private structure
* @ reset : reset being requested
*/
int ice_schedule_reset ( struct ice_pf * pf , enum ice_reset_req reset )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
/* bail out if earlier reset has failed */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_RESET_FAILED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
dev_dbg ( dev , " earlier reset has failed \n " ) ;
return - EIO ;
}
/* bail if reset/recovery already in progress */
if ( ice_is_reset_in_progress ( pf - > state ) ) {
dev_dbg ( dev , " Reset already in progress \n " ) ;
return - EBUSY ;
}
2021-07-20 16:20:39 +00:00
ice_unplug_aux_dev ( pf ) ;
2021-05-26 19:09:36 +00:00
switch ( reset ) {
case ICE_RESET_PFR :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
case ICE_RESET_CORER :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_CORER_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
case ICE_RESET_GLOBR :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
default :
return - EINVAL ;
}
ice_service_task_schedule ( pf ) ;
return 0 ;
}
/**
* ice_irq_affinity_notify - Callback for affinity changes
* @ notify : context as to what irq was changed
* @ mask : the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks .
*/
static void
ice_irq_affinity_notify ( struct irq_affinity_notify * notify ,
const cpumask_t * mask )
{
struct ice_q_vector * q_vector =
container_of ( notify , struct ice_q_vector , affinity_notify ) ;
cpumask_copy ( & q_vector - > affinity_mask , mask ) ;
}
/**
* ice_irq_affinity_release - Callback for affinity notifier release
* @ ref : internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications .
*/
static void ice_irq_affinity_release ( struct kref __always_unused * ref ) { }
/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @ vsi : the VSI being configured
*/
static int ice_vsi_ena_irq ( struct ice_vsi * vsi )
{
struct ice_hw * hw = & vsi - > back - > hw ;
int i ;
ice_for_each_q_vector ( vsi , i )
ice_irq_dynamic_ena ( hw , vsi , vsi - > q_vectors [ i ] ) ;
ice_flush ( hw ) ;
return 0 ;
}
/**
* ice_vsi_req_irq_msix - get MSI - X vectors from the OS for the VSI
* @ vsi : the VSI being configured
* @ basename : name for the vector
*/
static int ice_vsi_req_irq_msix ( struct ice_vsi * vsi , char * basename )
{
int q_vectors = vsi - > num_q_vectors ;
struct ice_pf * pf = vsi - > back ;
int base = vsi - > base_vector ;
struct device * dev ;
int rx_int_idx = 0 ;
int tx_int_idx = 0 ;
int vector , err ;
int irq_num ;
dev = ice_pf_to_dev ( pf ) ;
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ vector ] ;
irq_num = pf - > msix_entries [ base + vector ] . vector ;
if ( q_vector - > tx . ring & & q_vector - > rx . ring ) {
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " TxRx " , rx_int_idx + + ) ;
tx_int_idx + + ;
} else if ( q_vector - > rx . ring ) {
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " rx " , rx_int_idx + + ) ;
} else if ( q_vector - > tx . ring ) {
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " tx " , tx_int_idx + + ) ;
} else {
/* skip this unused q_vector */
continue ;
}
2021-07-12 19:01:19 +00:00
if ( vsi - > type = = ICE_VSI_CTRL & & vsi - > vf_id ! = ICE_INVAL_VFID )
err = devm_request_irq ( dev , irq_num , vsi - > irq_handler ,
IRQF_SHARED , q_vector - > name ,
q_vector ) ;
else
err = devm_request_irq ( dev , irq_num , vsi - > irq_handler ,
0 , q_vector - > name , q_vector ) ;
2021-05-26 19:09:36 +00:00
if ( err ) {
netdev_err ( vsi - > netdev , " MSIX request_irq failed, error: %d \n " ,
err ) ;
goto free_q_irqs ;
}
/* register for affinity change notifications */
if ( ! IS_ENABLED ( CONFIG_RFS_ACCEL ) ) {
struct irq_affinity_notify * affinity_notify ;
affinity_notify = & q_vector - > affinity_notify ;
affinity_notify - > notify = ice_irq_affinity_notify ;
affinity_notify - > release = ice_irq_affinity_release ;
irq_set_affinity_notifier ( irq_num , affinity_notify ) ;
}
/* assign the mask for this irq */
irq_set_affinity_hint ( irq_num , & q_vector - > affinity_mask ) ;
}
vsi - > irqs_ready = true ;
return 0 ;
free_q_irqs :
while ( vector ) {
vector - - ;
irq_num = pf - > msix_entries [ base + vector ] . vector ;
if ( ! IS_ENABLED ( CONFIG_RFS_ACCEL ) )
irq_set_affinity_notifier ( irq_num , NULL ) ;
irq_set_affinity_hint ( irq_num , NULL ) ;
devm_free_irq ( dev , irq_num , & vsi - > q_vectors [ vector ] ) ;
}
return err ;
}
/**
* ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
* @ vsi : VSI to setup Tx rings used by XDP
*
* Return 0 on success and negative value on error
*/
static int ice_xdp_alloc_setup_rings ( struct ice_vsi * vsi )
{
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
int i ;
for ( i = 0 ; i < vsi - > num_xdp_txq ; i + + ) {
u16 xdp_q_idx = vsi - > alloc_txq + i ;
struct ice_ring * xdp_ring ;
xdp_ring = kzalloc ( sizeof ( * xdp_ring ) , GFP_KERNEL ) ;
if ( ! xdp_ring )
goto free_xdp_rings ;
xdp_ring - > q_index = xdp_q_idx ;
xdp_ring - > reg_idx = vsi - > txq_map [ xdp_q_idx ] ;
xdp_ring - > ring_active = false ;
xdp_ring - > vsi = vsi ;
xdp_ring - > netdev = NULL ;
xdp_ring - > dev = dev ;
xdp_ring - > count = vsi - > num_tx_desc ;
WRITE_ONCE ( vsi - > xdp_rings [ i ] , xdp_ring ) ;
if ( ice_setup_tx_ring ( xdp_ring ) )
goto free_xdp_rings ;
ice_set_ring_xdp ( xdp_ring ) ;
xdp_ring - > xsk_pool = ice_xsk_pool ( xdp_ring ) ;
}
return 0 ;
free_xdp_rings :
for ( ; i > = 0 ; i - - )
if ( vsi - > xdp_rings [ i ] & & vsi - > xdp_rings [ i ] - > desc )
ice_free_tx_ring ( vsi - > xdp_rings [ i ] ) ;
return - ENOMEM ;
}
/**
* ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
* @ vsi : VSI to set the bpf prog on
* @ prog : the bpf prog pointer
*/
static void ice_vsi_assign_bpf_prog ( struct ice_vsi * vsi , struct bpf_prog * prog )
{
struct bpf_prog * old_prog ;
int i ;
old_prog = xchg ( & vsi - > xdp_prog , prog ) ;
if ( old_prog )
bpf_prog_put ( old_prog ) ;
ice_for_each_rxq ( vsi , i )
WRITE_ONCE ( vsi - > rx_rings [ i ] - > xdp_prog , vsi - > xdp_prog ) ;
}
/**
* ice_prepare_xdp_rings - Allocate , configure and setup Tx rings for XDP
* @ vsi : VSI to bring up Tx rings used by XDP
* @ prog : bpf program that will be assigned to VSI
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings ( struct ice_vsi * vsi , struct bpf_prog * prog )
{
u16 max_txqs [ ICE_MAX_TRAFFIC_CLASS ] = { 0 } ;
int xdp_rings_rem = vsi - > num_xdp_txq ;
struct ice_pf * pf = vsi - > back ;
struct ice_qs_cfg xdp_qs_cfg = {
. qs_mutex = & pf - > avail_q_mutex ,
. pf_map = pf - > avail_txqs ,
. pf_map_size = pf - > max_pf_txqs ,
. q_count = vsi - > num_xdp_txq ,
. scatter_count = ICE_MAX_SCATTER_TXQS ,
. vsi_map = vsi - > txq_map ,
. vsi_map_offset = vsi - > alloc_txq ,
. mapping_mode = ICE_VSI_MAP_CONTIG
} ;
enum ice_status status ;
struct device * dev ;
int i , v_idx ;
dev = ice_pf_to_dev ( pf ) ;
vsi - > xdp_rings = devm_kcalloc ( dev , vsi - > num_xdp_txq ,
sizeof ( * vsi - > xdp_rings ) , GFP_KERNEL ) ;
if ( ! vsi - > xdp_rings )
return - ENOMEM ;
vsi - > xdp_mapping_mode = xdp_qs_cfg . mapping_mode ;
if ( __ice_vsi_get_qs ( & xdp_qs_cfg ) )
goto err_map_xdp ;
if ( ice_xdp_alloc_setup_rings ( vsi ) )
goto clear_xdp_rings ;
/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector ( vsi , v_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ v_idx ] ;
int xdp_rings_per_v , q_id , q_base ;
xdp_rings_per_v = DIV_ROUND_UP ( xdp_rings_rem ,
vsi - > num_q_vectors - v_idx ) ;
q_base = vsi - > num_xdp_txq - xdp_rings_rem ;
for ( q_id = q_base ; q_id < ( q_base + xdp_rings_per_v ) ; q_id + + ) {
struct ice_ring * xdp_ring = vsi - > xdp_rings [ q_id ] ;
xdp_ring - > q_vector = q_vector ;
xdp_ring - > next = q_vector - > tx . ring ;
q_vector - > tx . ring = xdp_ring ;
}
xdp_rings_rem - = xdp_rings_per_v ;
}
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild , where
* ice_cfg_vsi_lan is being called
*/
if ( ice_is_reset_in_progress ( pf - > state ) )
return 0 ;
/* tell the Tx scheduler that right now we have
* additional queues
*/
for ( i = 0 ; i < vsi - > tc_cfg . numtc ; i + + )
max_txqs [ i ] = vsi - > num_txq + vsi - > num_xdp_txq ;
status = ice_cfg_vsi_lan ( vsi - > port_info , vsi - > idx , vsi - > tc_cfg . ena_tc ,
max_txqs ) ;
if ( status ) {
dev_err ( dev , " Failed VSI LAN queue config for XDP, error: %s \n " ,
ice_stat_str ( status ) ) ;
goto clear_xdp_rings ;
}
ice_vsi_assign_bpf_prog ( vsi , prog ) ;
return 0 ;
clear_xdp_rings :
for ( i = 0 ; i < vsi - > num_xdp_txq ; i + + )
if ( vsi - > xdp_rings [ i ] ) {
kfree_rcu ( vsi - > xdp_rings [ i ] , rcu ) ;
vsi - > xdp_rings [ i ] = NULL ;
}
err_map_xdp :
mutex_lock ( & pf - > avail_q_mutex ) ;
for ( i = 0 ; i < vsi - > num_xdp_txq ; i + + ) {
clear_bit ( vsi - > txq_map [ i + vsi - > alloc_txq ] , pf - > avail_txqs ) ;
vsi - > txq_map [ i + vsi - > alloc_txq ] = ICE_INVAL_Q_INDEX ;
}
mutex_unlock ( & pf - > avail_q_mutex ) ;
devm_kfree ( dev , vsi - > xdp_rings ) ;
return - ENOMEM ;
}
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @ vsi : VSI to remove XDP rings
*
* Detach XDP rings from irq vectors , clean up the PF bitmap and free
* resources
*/
int ice_destroy_xdp_rings ( struct ice_vsi * vsi )
{
u16 max_txqs [ ICE_MAX_TRAFFIC_CLASS ] = { 0 } ;
struct ice_pf * pf = vsi - > back ;
int i , v_idx ;
/* q_vectors are freed in reset path so there's no point in detaching
* rings ; in case of rebuild being triggered not from reset bits
* in pf - > state won ' t be set , so additionally check first q_vector
* against NULL
*/
if ( ice_is_reset_in_progress ( pf - > state ) | | ! vsi - > q_vectors [ 0 ] )
goto free_qmap ;
ice_for_each_q_vector ( vsi , v_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ v_idx ] ;
struct ice_ring * ring ;
ice_for_each_ring ( ring , q_vector - > tx )
if ( ! ring - > tx_buf | | ! ice_ring_is_xdp ( ring ) )
break ;
/* restore the value of last node prior to XDP setup */
q_vector - > tx . ring = ring ;
}
free_qmap :
mutex_lock ( & pf - > avail_q_mutex ) ;
for ( i = 0 ; i < vsi - > num_xdp_txq ; i + + ) {
clear_bit ( vsi - > txq_map [ i + vsi - > alloc_txq ] , pf - > avail_txqs ) ;
vsi - > txq_map [ i + vsi - > alloc_txq ] = ICE_INVAL_Q_INDEX ;
}
mutex_unlock ( & pf - > avail_q_mutex ) ;
for ( i = 0 ; i < vsi - > num_xdp_txq ; i + + )
if ( vsi - > xdp_rings [ i ] ) {
if ( vsi - > xdp_rings [ i ] - > desc )
ice_free_tx_ring ( vsi - > xdp_rings [ i ] ) ;
kfree_rcu ( vsi - > xdp_rings [ i ] , rcu ) ;
vsi - > xdp_rings [ i ] = NULL ;
}
devm_kfree ( ice_pf_to_dev ( pf ) , vsi - > xdp_rings ) ;
vsi - > xdp_rings = NULL ;
if ( ice_is_reset_in_progress ( pf - > state ) | | ! vsi - > q_vectors [ 0 ] )
return 0 ;
ice_vsi_assign_bpf_prog ( vsi , NULL ) ;
/* notify Tx scheduler that we destroyed XDP queues and bring
* back the old number of child nodes
*/
for ( i = 0 ; i < vsi - > tc_cfg . numtc ; i + + )
max_txqs [ i ] = vsi - > num_txq ;
/* change number of XDP Tx queues to 0 */
vsi - > num_xdp_txq = 0 ;
return ice_cfg_vsi_lan ( vsi - > port_info , vsi - > idx , vsi - > tc_cfg . ena_tc ,
max_txqs ) ;
}
/**
* ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
* @ vsi : VSI to schedule napi on
*/
static void ice_vsi_rx_napi_schedule ( struct ice_vsi * vsi )
{
int i ;
ice_for_each_rxq ( vsi , i ) {
struct ice_ring * rx_ring = vsi - > rx_rings [ i ] ;
if ( rx_ring - > xsk_pool )
napi_schedule ( & rx_ring - > q_vector - > napi ) ;
}
}
/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @ vsi : VSI to setup XDP for
* @ prog : XDP program
* @ extack : netlink extended ack
*/
static int
ice_xdp_setup_prog ( struct ice_vsi * vsi , struct bpf_prog * prog ,
struct netlink_ext_ack * extack )
{
int frame_size = vsi - > netdev - > mtu + ICE_ETH_PKT_HDR_PAD ;
bool if_running = netif_running ( vsi - > netdev ) ;
int ret = 0 , xdp_ring_err = 0 ;
if ( frame_size > vsi - > rx_buf_len ) {
NL_SET_ERR_MSG_MOD ( extack , " MTU too large for loading XDP " ) ;
return - EOPNOTSUPP ;
}
/* need to stop netdev while setting up the program for Rx rings */
2021-07-12 19:01:19 +00:00
if ( if_running & & ! test_and_set_bit ( ICE_VSI_DOWN , vsi - > state ) ) {
2021-05-26 19:09:36 +00:00
ret = ice_down ( vsi ) ;
if ( ret ) {
NL_SET_ERR_MSG_MOD ( extack , " Preparing device for XDP attach failed " ) ;
return ret ;
}
}
if ( ! ice_is_xdp_ena_vsi ( vsi ) & & prog ) {
vsi - > num_xdp_txq = vsi - > alloc_rxq ;
xdp_ring_err = ice_prepare_xdp_rings ( vsi , prog ) ;
if ( xdp_ring_err )
NL_SET_ERR_MSG_MOD ( extack , " Setting up XDP Tx resources failed " ) ;
} else if ( ice_is_xdp_ena_vsi ( vsi ) & & ! prog ) {
xdp_ring_err = ice_destroy_xdp_rings ( vsi ) ;
if ( xdp_ring_err )
NL_SET_ERR_MSG_MOD ( extack , " Freeing XDP Tx resources failed " ) ;
} else {
ice_vsi_assign_bpf_prog ( vsi , prog ) ;
}
if ( if_running )
ret = ice_up ( vsi ) ;
if ( ! ret & & prog )
ice_vsi_rx_napi_schedule ( vsi ) ;
return ( ret | | xdp_ring_err ) ? - ENOMEM : 0 ;
}
2021-07-12 19:01:19 +00:00
/**
* ice_xdp_safe_mode - XDP handler for safe mode
* @ dev : netdevice
* @ xdp : XDP command
*/
static int ice_xdp_safe_mode ( struct net_device __always_unused * dev ,
struct netdev_bpf * xdp )
{
NL_SET_ERR_MSG_MOD ( xdp - > extack ,
" Please provide working DDP firmware package in order to use XDP \n "
" Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst " ) ;
return - EOPNOTSUPP ;
}
2021-05-26 19:09:36 +00:00
/**
* ice_xdp - implements XDP handler
* @ dev : netdevice
* @ xdp : XDP command
*/
static int ice_xdp ( struct net_device * dev , struct netdev_bpf * xdp )
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_vsi * vsi = np - > vsi ;
if ( vsi - > type ! = ICE_VSI_PF ) {
NL_SET_ERR_MSG_MOD ( xdp - > extack , " XDP can be loaded only on PF VSI " ) ;
return - EINVAL ;
}
switch ( xdp - > command ) {
case XDP_SETUP_PROG :
return ice_xdp_setup_prog ( vsi , xdp - > prog , xdp - > extack ) ;
case XDP_SETUP_XSK_POOL :
return ice_xsk_pool_setup ( vsi , xdp - > xsk . pool ,
xdp - > xsk . queue_id ) ;
default :
return - EINVAL ;
}
}
/**
* ice_ena_misc_vector - enable the non - queue interrupts
* @ pf : board private structure
*/
static void ice_ena_misc_vector ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
u32 val ;
/* Disable anti-spoof detection interrupt to prevent spurious event
* interrupts during a function reset . Anti - spoof functionally is
* still supported .
*/
val = rd32 ( hw , GL_MDCK_TX_TDPU ) ;
val | = GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M ;
wr32 ( hw , GL_MDCK_TX_TDPU , val ) ;
/* clear things first */
wr32 ( hw , PFINT_OICR_ENA , 0 ) ; /* disable all */
rd32 ( hw , PFINT_OICR ) ; /* read to clear */
val = ( PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
2021-07-20 16:20:39 +00:00
PFINT_OICR_PE_PUSH_M |
2021-05-26 19:09:36 +00:00
PFINT_OICR_PE_CRITERR_M ) ;
wr32 ( hw , PFINT_OICR_ENA , val ) ;
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32 ( hw , GLINT_DYN_CTL ( pf - > oicr_idx ) ,
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M ) ;
}
/**
* ice_misc_intr - misc interrupt handler
* @ irq : interrupt number
* @ data : pointer to a q_vector
*/
static irqreturn_t ice_misc_intr ( int __always_unused irq , void * data )
{
struct ice_pf * pf = ( struct ice_pf * ) data ;
struct ice_hw * hw = & pf - > hw ;
irqreturn_t ret = IRQ_NONE ;
struct device * dev ;
u32 oicr , ena_mask ;
dev = ice_pf_to_dev ( pf ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) ;
set_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) ;
2021-07-20 16:20:39 +00:00
set_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
oicr = rd32 ( hw , PFINT_OICR ) ;
ena_mask = rd32 ( hw , PFINT_OICR_ENA ) ;
if ( oicr & PFINT_OICR_SWINT_M ) {
ena_mask & = ~ PFINT_OICR_SWINT_M ;
pf - > sw_int_count + + ;
}
if ( oicr & PFINT_OICR_MAL_DETECT_M ) {
ena_mask & = ~ PFINT_OICR_MAL_DETECT_M ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
}
if ( oicr & PFINT_OICR_VFLR_M ) {
/* disable any further VFLR event notifications */
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_VF_RESETS_DISABLED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
u32 reg = rd32 ( hw , PFINT_OICR_ENA ) ;
reg & = ~ PFINT_OICR_VFLR_M ;
wr32 ( hw , PFINT_OICR_ENA , reg ) ;
} else {
ena_mask & = ~ PFINT_OICR_VFLR_M ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VFLR_EVENT_PENDING , pf - > state ) ;
2021-05-26 19:09:36 +00:00
}
}
if ( oicr & PFINT_OICR_GRST_M ) {
u32 reset ;
/* we have a reset warning */
ena_mask & = ~ PFINT_OICR_GRST_M ;
reset = ( rd32 ( hw , GLGEN_RSTAT ) & GLGEN_RSTAT_RESET_TYPE_M ) > >
GLGEN_RSTAT_RESET_TYPE_S ;
if ( reset = = ICE_RESET_CORER )
pf - > corer_count + + ;
else if ( reset = = ICE_RESET_GLOBR )
pf - > globr_count + + ;
else if ( reset = = ICE_RESET_EMPR )
pf - > empr_count + + ;
else
dev_dbg ( dev , " Invalid reset type %d \n " , reset ) ;
/* If a reset cycle isn't already in progress, we set a bit in
* pf - > state so that the service task can start a reset / rebuild .
*/
2021-07-12 19:01:19 +00:00
if ( ! test_and_set_bit ( ICE_RESET_OICR_RECV , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
if ( reset = = ICE_RESET_CORER )
2021-07-12 19:01:19 +00:00
set_bit ( ICE_CORER_RECV , pf - > state ) ;
2021-05-26 19:09:36 +00:00
else if ( reset = = ICE_RESET_GLOBR )
2021-07-12 19:01:19 +00:00
set_bit ( ICE_GLOBR_RECV , pf - > state ) ;
2021-05-26 19:09:36 +00:00
else
2021-07-12 19:01:19 +00:00
set_bit ( ICE_EMPR_RECV , pf - > state ) ;
2021-05-26 19:09:36 +00:00
/* There are couple of different bits at play here.
* hw - > reset_ongoing indicates whether the hardware is
* in reset . This is set to true when a reset interrupt
* is received and set back to false after the driver
* has determined that the hardware is out of reset .
*
2021-07-12 19:01:19 +00:00
* ICE_RESET_OICR_RECV in pf - > state indicates
2021-05-26 19:09:36 +00:00
* that a post reset rebuild is required before the
* driver is operational again . This is set above .
*
* As this is the start of the reset / rebuild cycle , set
* both to indicate that .
*/
hw - > reset_ongoing = true ;
}
}
2021-07-20 16:20:39 +00:00
if ( oicr & PFINT_OICR_TSYN_TX_M ) {
ena_mask & = ~ PFINT_OICR_TSYN_TX_M ;
ice_ptp_process_ts ( pf ) ;
}
if ( oicr & PFINT_OICR_TSYN_EVNT_M ) {
u8 tmr_idx = hw - > func_caps . ts_func_info . tmr_index_owned ;
u32 gltsyn_stat = rd32 ( hw , GLTSYN_STAT ( tmr_idx ) ) ;
/* Save EVENTs from GTSYN register */
pf - > ptp . ext_ts_irq | = gltsyn_stat & ( GLTSYN_STAT_EVENT0_M |
GLTSYN_STAT_EVENT1_M |
GLTSYN_STAT_EVENT2_M ) ;
ena_mask & = ~ PFINT_OICR_TSYN_EVNT_M ;
kthread_queue_work ( pf - > ptp . kworker , & pf - > ptp . extts_work ) ;
}
# define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if ( oicr & ICE_AUX_CRIT_ERR ) {
struct iidc_event * event ;
ena_mask & = ~ ICE_AUX_CRIT_ERR ;
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
if ( event ) {
set_bit ( IIDC_EVENT_CRIT_ERR , event - > type ) ;
/* report the entire OICR value to AUX driver */
event - > reg = oicr ;
ice_send_event_to_aux ( pf , event ) ;
kfree ( event ) ;
}
2021-05-26 19:09:36 +00:00
}
/* Report any remaining unexpected interrupts */
oicr & = ena_mask ;
if ( oicr ) {
dev_dbg ( dev , " unhandled interrupt oicr=0x%08x \n " , oicr ) ;
/* If a critical error is pending there is no choice but to
* reset the device .
*/
2021-07-20 16:20:39 +00:00
if ( oicr & ( PFINT_OICR_PCI_EXCEPTION_M |
2021-05-26 19:09:36 +00:00
PFINT_OICR_ECC_ERR_M ) ) {
2021-07-12 19:01:19 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_service_task_schedule ( pf ) ;
}
}
ret = IRQ_HANDLED ;
ice_service_task_schedule ( pf ) ;
ice_irq_dynamic_ena ( hw , NULL , NULL ) ;
return ret ;
}
/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @ hw : pointer to HW structure
*/
static void ice_dis_ctrlq_interrupts ( struct ice_hw * hw )
{
/* disable Admin queue Interrupt causes */
wr32 ( hw , PFINT_FW_CTL ,
rd32 ( hw , PFINT_FW_CTL ) & ~ PFINT_FW_CTL_CAUSE_ENA_M ) ;
/* disable Mailbox queue Interrupt causes */
wr32 ( hw , PFINT_MBX_CTL ,
rd32 ( hw , PFINT_MBX_CTL ) & ~ PFINT_MBX_CTL_CAUSE_ENA_M ) ;
2021-07-20 16:20:39 +00:00
wr32 ( hw , PFINT_SB_CTL ,
rd32 ( hw , PFINT_SB_CTL ) & ~ PFINT_SB_CTL_CAUSE_ENA_M ) ;
2021-05-26 19:09:36 +00:00
/* disable Control queue Interrupt causes */
wr32 ( hw , PFINT_OICR_CTL ,
rd32 ( hw , PFINT_OICR_CTL ) & ~ PFINT_OICR_CTL_CAUSE_ENA_M ) ;
ice_flush ( hw ) ;
}
/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @ pf : board private structure
*/
static void ice_free_irq_msix_misc ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
ice_dis_ctrlq_interrupts ( hw ) ;
/* disable OICR interrupt */
wr32 ( hw , PFINT_OICR_ENA , 0 ) ;
ice_flush ( hw ) ;
if ( pf - > msix_entries ) {
synchronize_irq ( pf - > msix_entries [ pf - > oicr_idx ] . vector ) ;
devm_free_irq ( ice_pf_to_dev ( pf ) ,
pf - > msix_entries [ pf - > oicr_idx ] . vector , pf ) ;
}
pf - > num_avail_sw_msix + = 1 ;
ice_free_res ( pf - > irq_tracker , pf - > oicr_idx , ICE_RES_MISC_VEC_ID ) ;
}
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @ hw : pointer to HW structure
* @ reg_idx : HW vector index to associate the control queue interrupts with
*/
static void ice_ena_ctrlq_interrupts ( struct ice_hw * hw , u16 reg_idx )
{
u32 val ;
val = ( ( reg_idx & PFINT_OICR_CTL_MSIX_INDX_M ) |
PFINT_OICR_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_OICR_CTL , val ) ;
/* enable Admin queue Interrupt causes */
val = ( ( reg_idx & PFINT_FW_CTL_MSIX_INDX_M ) |
PFINT_FW_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_FW_CTL , val ) ;
/* enable Mailbox queue Interrupt causes */
val = ( ( reg_idx & PFINT_MBX_CTL_MSIX_INDX_M ) |
PFINT_MBX_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_MBX_CTL , val ) ;
2021-07-20 16:20:39 +00:00
/* This enables Sideband queue Interrupt causes */
val = ( ( reg_idx & PFINT_SB_CTL_MSIX_INDX_M ) |
PFINT_SB_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_SB_CTL , val ) ;
2021-05-26 19:09:36 +00:00
ice_flush ( hw ) ;
}
/**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @ pf : board private structure
*
* This sets up the handler for MSIX 0 , which is used to manage the
* non - queue interrupts , e . g . AdminQ and errors . This is not used
* when in MSI or Legacy interrupt mode .
*/
static int ice_req_irq_msix_misc ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
int oicr_idx , err = 0 ;
if ( ! pf - > int_name [ 0 ] )
snprintf ( pf - > int_name , sizeof ( pf - > int_name ) - 1 , " %s-%s:misc " ,
dev_driver_string ( dev ) , dev_name ( dev ) ) ;
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset . Note that this function is called only during
* rebuild path and not while reset is in progress .
*/
if ( ice_is_reset_in_progress ( pf - > state ) )
goto skip_req_irq ;
/* reserve one vector in irq_tracker for misc interrupts */
oicr_idx = ice_get_res ( pf , pf - > irq_tracker , 1 , ICE_RES_MISC_VEC_ID ) ;
if ( oicr_idx < 0 )
return oicr_idx ;
pf - > num_avail_sw_msix - = 1 ;
pf - > oicr_idx = ( u16 ) oicr_idx ;
err = devm_request_irq ( dev , pf - > msix_entries [ pf - > oicr_idx ] . vector ,
ice_misc_intr , 0 , pf - > int_name , pf ) ;
if ( err ) {
dev_err ( dev , " devm_request_irq for %s failed: %d \n " ,
pf - > int_name , err ) ;
ice_free_res ( pf - > irq_tracker , 1 , ICE_RES_MISC_VEC_ID ) ;
pf - > num_avail_sw_msix + = 1 ;
return err ;
}
skip_req_irq :
ice_ena_misc_vector ( pf ) ;
ice_ena_ctrlq_interrupts ( hw , pf - > oicr_idx ) ;
wr32 ( hw , GLINT_ITR ( ICE_RX_ITR , pf - > oicr_idx ) ,
ITR_REG_ALIGN ( ICE_ITR_8K ) > > ICE_ITR_GRAN_S ) ;
ice_flush ( hw ) ;
ice_irq_dynamic_ena ( hw , NULL , NULL ) ;
return 0 ;
}
/**
* ice_napi_add - register NAPI handler for the VSI
* @ vsi : VSI for which NAPI handler is to be registered
*
* This function is only called in the driver ' s load path . Registering the NAPI
* handler is done in ice_vsi_alloc_q_vector ( ) for all other cases ( i . e . resume ,
* reset / rebuild , etc . )
*/
static void ice_napi_add ( struct ice_vsi * vsi )
{
int v_idx ;
if ( ! vsi - > netdev )
return ;
ice_for_each_q_vector ( vsi , v_idx )
netif_napi_add ( vsi - > netdev , & vsi - > q_vectors [ v_idx ] - > napi ,
ice_napi_poll , NAPI_POLL_WEIGHT ) ;
}
/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @ netdev : netdev instance
*/
static void ice_set_ops ( struct net_device * netdev )
{
struct ice_pf * pf = ice_netdev_to_pf ( netdev ) ;
if ( ice_is_safe_mode ( pf ) ) {
netdev - > netdev_ops = & ice_netdev_safe_mode_ops ;
ice_set_ethtool_safe_mode_ops ( netdev ) ;
return ;
}
netdev - > netdev_ops = & ice_netdev_ops ;
netdev - > udp_tunnel_nic_info = & pf - > hw . udp_tunnel_nic ;
ice_set_ethtool_ops ( netdev ) ;
}
/**
* ice_set_netdev_features - set features for the given netdev
* @ netdev : netdev instance
*/
static void ice_set_netdev_features ( struct net_device * netdev )
{
struct ice_pf * pf = ice_netdev_to_pf ( netdev ) ;
netdev_features_t csumo_features ;
netdev_features_t vlano_features ;
netdev_features_t dflt_features ;
netdev_features_t tso_features ;
if ( ice_is_safe_mode ( pf ) ) {
/* safe mode */
netdev - > features = NETIF_F_SG | NETIF_F_HIGHDMA ;
netdev - > hw_features = netdev - > features ;
return ;
}
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
NETIF_F_NTUPLE |
NETIF_F_RXHASH ;
csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_IPV6_CSUM ;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX ;
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4 ;
netdev - > gso_partial_features | = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM ;
/* set features that user can change */
netdev - > hw_features = dflt_features | csumo_features |
vlano_features | tso_features ;
/* add support for HW_CSUM on packets with MPLS header */
netdev - > mpls_features = NETIF_F_HW_CSUM ;
/* enable features */
netdev - > features | = netdev - > hw_features ;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev - > hw_enc_features | = dflt_features | csumo_features |
tso_features ;
netdev - > vlan_features | = dflt_features | csumo_features |
tso_features ;
}
/**
* ice_cfg_netdev - Allocate , configure and register a netdev
* @ vsi : the VSI associated with the new netdev
*
* Returns 0 on success , negative value on failure
*/
static int ice_cfg_netdev ( struct ice_vsi * vsi )
{
struct ice_netdev_priv * np ;
struct net_device * netdev ;
u8 mac_addr [ ETH_ALEN ] ;
netdev = alloc_etherdev_mqs ( sizeof ( * np ) , vsi - > alloc_txq ,
vsi - > alloc_rxq ) ;
2021-07-12 19:01:19 +00:00
if ( ! netdev )
return - ENOMEM ;
2021-05-26 19:09:36 +00:00
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_NETDEV_ALLOCD , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
vsi - > netdev = netdev ;
np = netdev_priv ( netdev ) ;
np - > vsi = vsi ;
ice_set_netdev_features ( netdev ) ;
ice_set_ops ( netdev ) ;
if ( vsi - > type = = ICE_VSI_PF ) {
2021-07-20 16:20:39 +00:00
SET_NETDEV_DEV ( netdev , ice_pf_to_dev ( vsi - > back ) ) ;
2021-05-26 19:09:36 +00:00
ether_addr_copy ( mac_addr , vsi - > port_info - > mac . perm_addr ) ;
ether_addr_copy ( netdev - > dev_addr , mac_addr ) ;
ether_addr_copy ( netdev - > perm_addr , mac_addr ) ;
}
netdev - > priv_flags | = IFF_UNICAST_FLT ;
/* Setup netdev TC information */
ice_vsi_cfg_netdev_tc ( vsi , vsi - > tc_cfg . ena_tc ) ;
/* setup watchdog timeout value to be 5 second */
netdev - > watchdog_timeo = 5 * HZ ;
netdev - > min_mtu = ETH_MIN_MTU ;
netdev - > max_mtu = ICE_MAX_MTU ;
return 0 ;
}
/**
* ice_fill_rss_lut - Fill the RSS lookup table with default values
* @ lut : Lookup table
* @ rss_table_size : Lookup table size
* @ rss_size : Range of queue number for hashing
*/
void ice_fill_rss_lut ( u8 * lut , u16 rss_table_size , u16 rss_size )
{
u16 i ;
for ( i = 0 ; i < rss_table_size ; i + + )
lut [ i ] = i % rss_size ;
}
/**
* ice_pf_vsi_setup - Set up a PF VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
*/
static struct ice_vsi *
ice_pf_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
return ice_vsi_setup ( pf , pi , ICE_VSI_PF , ICE_INVAL_VFID ) ;
}
/**
* ice_ctrl_vsi_setup - Set up a control VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
*/
static struct ice_vsi *
ice_ctrl_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
return ice_vsi_setup ( pf , pi , ICE_VSI_CTRL , ICE_INVAL_VFID ) ;
}
/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
*/
struct ice_vsi *
ice_lb_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
return ice_vsi_setup ( pf , pi , ICE_VSI_LB , ICE_INVAL_VFID ) ;
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @ netdev : network interface to be adjusted
* @ proto : unused protocol
* @ vid : VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
ice_vlan_rx_add_vid ( struct net_device * netdev , __always_unused __be16 proto ,
u16 vid )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
int ret ;
/* VLAN 0 is added by default during load/reset */
if ( ! vid )
return 0 ;
/* Enable VLAN pruning when a VLAN other than 0 is added */
if ( ! ice_vsi_is_vlan_pruning_ena ( vsi ) ) {
ret = ice_cfg_vlan_pruning ( vsi , true , false ) ;
if ( ret )
return ret ;
}
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren ' t pruned by the device ' s internal switch on Rx
*/
ret = ice_vsi_add_vlan ( vsi , vid , ICE_FWD_TO_VSI ) ;
if ( ! ret )
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_VLAN_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
return ret ;
}
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @ netdev : network interface to be adjusted
* @ proto : unused protocol
* @ vid : VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
ice_vlan_rx_kill_vid ( struct net_device * netdev , __always_unused __be16 proto ,
u16 vid )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
int ret ;
/* don't allow removal of VLAN 0 */
if ( ! vid )
return 0 ;
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
ret = ice_vsi_kill_vlan ( vsi , vid ) ;
if ( ret )
return ret ;
/* Disable pruning when VLAN 0 is the only VLAN rule */
if ( vsi - > num_vlan = = 1 & & ice_vsi_is_vlan_pruning_ena ( vsi ) )
ret = ice_cfg_vlan_pruning ( vsi , false , false ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_VLAN_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
return ret ;
}
/**
* ice_setup_pf_sw - Setup the HW switch on startup or after reset
* @ pf : board private structure
*
* Returns 0 on success , negative value on failure
*/
static int ice_setup_pf_sw ( struct ice_pf * pf )
{
struct ice_vsi * vsi ;
int status = 0 ;
if ( ice_is_reset_in_progress ( pf - > state ) )
return - EBUSY ;
vsi = ice_pf_vsi_setup ( pf , pf - > hw . port_info ) ;
if ( ! vsi )
return - ENOMEM ;
status = ice_cfg_netdev ( vsi ) ;
if ( status ) {
status = - ENODEV ;
goto unroll_vsi_setup ;
}
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size ( vsi ) ;
/* Setup DCB netlink interface */
ice_dcbnl_setup ( vsi ) ;
/* registering the NAPI handler requires both the queues and
* netdev to be created , which are done in ice_pf_vsi_setup ( )
* and ice_cfg_netdev ( ) respectively
*/
ice_napi_add ( vsi ) ;
status = ice_set_cpu_rx_rmap ( vsi ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( pf ) , " Failed to set CPU Rx map VSI %d error %d \n " ,
vsi - > vsi_num , status ) ;
status = - EINVAL ;
goto unroll_napi_add ;
}
status = ice_init_mac_fltr ( pf ) ;
if ( status )
goto free_cpu_rx_map ;
return status ;
free_cpu_rx_map :
ice_free_cpu_rx_rmap ( vsi ) ;
unroll_napi_add :
if ( vsi ) {
ice_napi_del ( vsi ) ;
if ( vsi - > netdev ) {
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_VSI_NETDEV_ALLOCD , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
free_netdev ( vsi - > netdev ) ;
vsi - > netdev = NULL ;
}
}
unroll_vsi_setup :
ice_vsi_release ( vsi ) ;
return status ;
}
/**
* ice_get_avail_q_count - Get count of queues in use
* @ pf_qmap : bitmap to get queue use count from
* @ lock : pointer to a mutex that protects access to pf_qmap
* @ size : size of the bitmap
*/
static u16
ice_get_avail_q_count ( unsigned long * pf_qmap , struct mutex * lock , u16 size )
{
unsigned long bit ;
u16 count = 0 ;
mutex_lock ( lock ) ;
for_each_clear_bit ( bit , pf_qmap , size )
count + + ;
mutex_unlock ( lock ) ;
return count ;
}
/**
* ice_get_avail_txq_count - Get count of Tx queues in use
* @ pf : pointer to an ice_pf instance
*/
u16 ice_get_avail_txq_count ( struct ice_pf * pf )
{
return ice_get_avail_q_count ( pf - > avail_txqs , & pf - > avail_q_mutex ,
pf - > max_pf_txqs ) ;
}
/**
* ice_get_avail_rxq_count - Get count of Rx queues in use
* @ pf : pointer to an ice_pf instance
*/
u16 ice_get_avail_rxq_count ( struct ice_pf * pf )
{
return ice_get_avail_q_count ( pf - > avail_rxqs , & pf - > avail_q_mutex ,
pf - > max_pf_rxqs ) ;
}
/**
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @ pf : board private structure to initialize
*/
static void ice_deinit_pf ( struct ice_pf * pf )
{
ice_service_task_stop ( pf ) ;
mutex_destroy ( & pf - > sw_mutex ) ;
mutex_destroy ( & pf - > tc_mutex ) ;
mutex_destroy ( & pf - > avail_q_mutex ) ;
if ( pf - > avail_txqs ) {
bitmap_free ( pf - > avail_txqs ) ;
pf - > avail_txqs = NULL ;
}
if ( pf - > avail_rxqs ) {
bitmap_free ( pf - > avail_rxqs ) ;
pf - > avail_rxqs = NULL ;
}
2021-07-20 16:20:39 +00:00
if ( pf - > ptp . clock )
ptp_clock_unregister ( pf - > ptp . clock ) ;
2021-05-26 19:09:36 +00:00
}
/**
* ice_set_pf_caps - set PFs capability flags
* @ pf : pointer to the PF instance
*/
static void ice_set_pf_caps ( struct ice_pf * pf )
{
struct ice_hw_func_caps * func_caps = & pf - > hw . func_caps ;
2021-07-20 16:20:39 +00:00
clear_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ;
clear_bit ( ICE_FLAG_AUX_ENA , pf - > flags ) ;
if ( func_caps - > common_cap . rdma ) {
set_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ;
set_bit ( ICE_FLAG_AUX_ENA , pf - > flags ) ;
}
2021-05-26 19:09:36 +00:00
clear_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
if ( func_caps - > common_cap . dcb )
set_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
clear_bit ( ICE_FLAG_SRIOV_CAPABLE , pf - > flags ) ;
if ( func_caps - > common_cap . sr_iov_1_1 ) {
set_bit ( ICE_FLAG_SRIOV_CAPABLE , pf - > flags ) ;
pf - > num_vfs_supported = min_t ( int , func_caps - > num_allocd_vfs ,
ICE_MAX_VF_COUNT ) ;
}
clear_bit ( ICE_FLAG_RSS_ENA , pf - > flags ) ;
if ( func_caps - > common_cap . rss_table_size )
set_bit ( ICE_FLAG_RSS_ENA , pf - > flags ) ;
clear_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ;
if ( func_caps - > fd_fltr_guar > 0 | | func_caps - > fd_fltr_best_effort > 0 ) {
u16 unused ;
/* ctrl_vsi_idx will be set to a valid value when flow director
* is setup by ice_init_fdir
*/
pf - > ctrl_vsi_idx = ICE_NO_VSI ;
set_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ;
/* force guaranteed filter pool for PF */
ice_alloc_fd_guar_item ( & pf - > hw , & unused ,
func_caps - > fd_fltr_guar ) ;
/* force shared filter pool for PF */
ice_alloc_fd_shrd_item ( & pf - > hw , & unused ,
func_caps - > fd_fltr_best_effort ) ;
}
2021-07-20 16:20:39 +00:00
clear_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) ;
if ( func_caps - > common_cap . ieee_1588 )
set_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) ;
2021-05-26 19:09:36 +00:00
pf - > max_pf_txqs = func_caps - > common_cap . num_txq ;
pf - > max_pf_rxqs = func_caps - > common_cap . num_rxq ;
}
/**
* ice_init_pf - Initialize general software structures ( struct ice_pf )
* @ pf : board private structure to initialize
*/
static int ice_init_pf ( struct ice_pf * pf )
{
ice_set_pf_caps ( pf ) ;
mutex_init ( & pf - > sw_mutex ) ;
mutex_init ( & pf - > tc_mutex ) ;
INIT_HLIST_HEAD ( & pf - > aq_wait_list ) ;
spin_lock_init ( & pf - > aq_wait_lock ) ;
init_waitqueue_head ( & pf - > aq_wait_queue ) ;
2021-07-20 16:20:39 +00:00
init_waitqueue_head ( & pf - > reset_wait_queue ) ;
2021-05-26 19:09:36 +00:00
/* setup service timer and periodic service task */
timer_setup ( & pf - > serv_tmr , ice_service_timer , 0 ) ;
pf - > serv_tmr_period = HZ ;
INIT_WORK ( & pf - > serv_task , ice_service_task ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
mutex_init ( & pf - > avail_q_mutex ) ;
pf - > avail_txqs = bitmap_zalloc ( pf - > max_pf_txqs , GFP_KERNEL ) ;
if ( ! pf - > avail_txqs )
return - ENOMEM ;
pf - > avail_rxqs = bitmap_zalloc ( pf - > max_pf_rxqs , GFP_KERNEL ) ;
if ( ! pf - > avail_rxqs ) {
devm_kfree ( ice_pf_to_dev ( pf ) , pf - > avail_txqs ) ;
pf - > avail_txqs = NULL ;
return - ENOMEM ;
}
return 0 ;
}
/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @ pf : board private structure
*
* compute the number of MSIX vectors required ( v_budget ) and request from
* the OS . Return the number of vectors reserved or negative on failure
*/
static int ice_ena_msix_range ( struct ice_pf * pf )
{
2021-07-20 16:20:39 +00:00
int num_cpus , v_left , v_actual , v_other , v_budget = 0 ;
2021-05-26 19:09:36 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
int needed , err , i ;
v_left = pf - > hw . func_caps . common_cap . num_msix_vectors ;
2021-07-20 16:20:39 +00:00
num_cpus = num_online_cpus ( ) ;
2021-05-26 19:09:36 +00:00
/* reserve for LAN miscellaneous handler */
needed = ICE_MIN_LAN_OICR_MSIX ;
if ( v_left < needed )
goto no_hw_vecs_left_err ;
v_budget + = needed ;
v_left - = needed ;
/* reserve for flow director */
if ( test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ) {
needed = ICE_FDIR_MSIX ;
if ( v_left < needed )
goto no_hw_vecs_left_err ;
v_budget + = needed ;
v_left - = needed ;
}
/* total used for non-traffic vectors */
v_other = v_budget ;
/* reserve vectors for LAN traffic */
2021-07-20 16:20:39 +00:00
needed = num_cpus ;
2021-05-26 19:09:36 +00:00
if ( v_left < needed )
goto no_hw_vecs_left_err ;
pf - > num_lan_msix = needed ;
v_budget + = needed ;
v_left - = needed ;
2021-07-20 16:20:39 +00:00
/* reserve vectors for RDMA auxiliary driver */
if ( test_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX ;
if ( v_left < needed )
goto no_hw_vecs_left_err ;
pf - > num_rdma_msix = needed ;
v_budget + = needed ;
v_left - = needed ;
}
2021-05-26 19:09:36 +00:00
pf - > msix_entries = devm_kcalloc ( dev , v_budget ,
sizeof ( * pf - > msix_entries ) , GFP_KERNEL ) ;
if ( ! pf - > msix_entries ) {
err = - ENOMEM ;
goto exit_err ;
}
for ( i = 0 ; i < v_budget ; i + + )
pf - > msix_entries [ i ] . entry = i ;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range ( pf - > pdev , pf - > msix_entries ,
ICE_MIN_MSIX , v_budget ) ;
if ( v_actual < 0 ) {
dev_err ( dev , " unable to reserve MSI-X vectors \n " ) ;
err = v_actual ;
goto msix_err ;
}
if ( v_actual < v_budget ) {
dev_warn ( dev , " not enough OS MSI-X vectors. requested = %d, obtained = %d \n " ,
v_budget , v_actual ) ;
if ( v_actual < ICE_MIN_MSIX ) {
/* error if we can't get minimum vectors */
pci_disable_msix ( pf - > pdev ) ;
err = - ERANGE ;
goto msix_err ;
} else {
2021-07-20 16:20:39 +00:00
int v_remain = v_actual - v_other ;
int v_rdma = 0 , v_min_rdma = 0 ;
if ( test_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1 ;
v_min_rdma = ICE_MIN_RDMA_MSIX ;
}
2021-05-26 19:09:36 +00:00
if ( v_actual = = ICE_MIN_MSIX | |
2021-07-20 16:20:39 +00:00
v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma ) {
dev_warn ( dev , " Not enough MSI-X vectors to support RDMA. \n " ) ;
clear_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ;
pf - > num_rdma_msix = 0 ;
2021-05-26 19:09:36 +00:00
pf - > num_lan_msix = ICE_MIN_LAN_TXRX_MSIX ;
2021-07-20 16:20:39 +00:00
} else if ( ( v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma ) | |
( v_remain - v_rdma < v_rdma ) ) {
/* Support minimum RDMA and give remaining
* vectors to LAN MSIX
*/
pf - > num_rdma_msix = v_min_rdma ;
pf - > num_lan_msix = v_remain - v_min_rdma ;
} else {
/* Split remaining MSIX with RDMA after
* accounting for AEQ MSIX
*/
pf - > num_rdma_msix = ( v_remain - ICE_RDMA_NUM_AEQ_MSIX ) / 2 +
ICE_RDMA_NUM_AEQ_MSIX ;
pf - > num_lan_msix = v_remain - pf - > num_rdma_msix ;
}
2021-05-26 19:09:36 +00:00
dev_notice ( dev , " Enabled %d MSI-X vectors for LAN traffic. \n " ,
pf - > num_lan_msix ) ;
2021-07-20 16:20:39 +00:00
if ( test_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) )
dev_notice ( dev , " Enabled %d MSI-X vectors for RDMA. \n " ,
pf - > num_rdma_msix ) ;
2021-05-26 19:09:36 +00:00
}
}
return v_actual ;
msix_err :
devm_kfree ( dev , pf - > msix_entries ) ;
goto exit_err ;
no_hw_vecs_left_err :
dev_err ( dev , " not enough device MSI-X vectors. requested = %d, available = %d \n " ,
needed , v_left ) ;
err = - ERANGE ;
exit_err :
2021-07-20 16:20:39 +00:00
pf - > num_rdma_msix = 0 ;
2021-05-26 19:09:36 +00:00
pf - > num_lan_msix = 0 ;
return err ;
}
/**
* ice_dis_msix - Disable MSI - X interrupt setup in OS
* @ pf : board private structure
*/
static void ice_dis_msix ( struct ice_pf * pf )
{
pci_disable_msix ( pf - > pdev ) ;
devm_kfree ( ice_pf_to_dev ( pf ) , pf - > msix_entries ) ;
pf - > msix_entries = NULL ;
}
/**
* ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
* @ pf : board private structure
*/
static void ice_clear_interrupt_scheme ( struct ice_pf * pf )
{
ice_dis_msix ( pf ) ;
if ( pf - > irq_tracker ) {
devm_kfree ( ice_pf_to_dev ( pf ) , pf - > irq_tracker ) ;
pf - > irq_tracker = NULL ;
}
}
/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @ pf : board private structure to initialize
*/
static int ice_init_interrupt_scheme ( struct ice_pf * pf )
{
int vectors ;
vectors = ice_ena_msix_range ( pf ) ;
if ( vectors < 0 )
return vectors ;
/* set up vector assignment tracking */
pf - > irq_tracker = devm_kzalloc ( ice_pf_to_dev ( pf ) ,
struct_size ( pf - > irq_tracker , list , vectors ) ,
GFP_KERNEL ) ;
if ( ! pf - > irq_tracker ) {
ice_dis_msix ( pf ) ;
return - ENOMEM ;
}
/* populate SW interrupts pool with number of OS granted IRQs. */
pf - > num_avail_sw_msix = ( u16 ) vectors ;
pf - > irq_tracker - > num_entries = ( u16 ) vectors ;
pf - > irq_tracker - > end = pf - > irq_tracker - > num_entries ;
return 0 ;
}
/**
* ice_is_wol_supported - check if WoL is supported
* @ hw : pointer to hardware info
*
* Check if WoL is supported based on the HW configuration .
* Returns true if NVM supports and enables WoL for this port , false otherwise
*/
bool ice_is_wol_supported ( struct ice_hw * hw )
{
u16 wol_ctrl ;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
* word ) indicates WoL is not supported on the corresponding PF ID .
*/
if ( ice_read_sr_word ( hw , ICE_SR_NVM_WOL_CFG , & wol_ctrl ) )
return false ;
return ! ( BIT ( hw - > port_info - > lport ) & wol_ctrl ) ;
}
/**
* ice_vsi_recfg_qs - Change the number of queues on a VSI
* @ vsi : VSI being changed
* @ new_rx : new number of Rx queues
* @ new_tx : new number of Tx queues
*
* Only change the number of queues if new_tx , or new_rx is non - 0.
*
* Returns 0 on success .
*/
int ice_vsi_recfg_qs ( struct ice_vsi * vsi , int new_rx , int new_tx )
{
struct ice_pf * pf = vsi - > back ;
int err = 0 , timeout = 50 ;
if ( ! new_rx & & ! new_tx )
return - EINVAL ;
2021-07-12 19:01:19 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
timeout - - ;
if ( ! timeout )
return - EBUSY ;
usleep_range ( 1000 , 2000 ) ;
}
if ( new_tx )
vsi - > req_txq = ( u16 ) new_tx ;
if ( new_rx )
vsi - > req_rxq = ( u16 ) new_rx ;
/* set for the next time the netdev is started */
if ( ! netif_running ( vsi - > netdev ) ) {
ice_vsi_rebuild ( vsi , false ) ;
dev_dbg ( ice_pf_to_dev ( pf ) , " Link is down, queue count change happens when link is brought up \n " ) ;
goto done ;
}
ice_vsi_close ( vsi ) ;
ice_vsi_rebuild ( vsi , false ) ;
ice_pf_dcb_recfg ( pf ) ;
ice_vsi_open ( vsi ) ;
done :
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_CFG_BUSY , pf - > state ) ;
2021-05-26 19:09:36 +00:00
return err ;
}
/**
* ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
* @ pf : PF to configure
*
* No VLAN offloads / filtering are advertised in safe mode so make sure the PF
* VSI can still Tx / Rx VLAN tagged packets .
*/
static void ice_set_safe_mode_vlan_cfg ( struct ice_pf * pf )
{
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
struct ice_vsi_ctx * ctxt ;
enum ice_status status ;
struct ice_hw * hw ;
if ( ! vsi )
return ;
ctxt = kzalloc ( sizeof ( * ctxt ) , GFP_KERNEL ) ;
if ( ! ctxt )
return ;
hw = & pf - > hw ;
ctxt - > info = vsi - > info ;
ctxt - > info . valid_sections =
cpu_to_le16 ( ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID ) ;
/* disable VLAN anti-spoof */
ctxt - > info . sec_flags & = ~ ( ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA < <
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S ) ;
/* disable VLAN pruning and keep all other settings */
ctxt - > info . sw_flags2 & = ~ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA ;
/* allow all VLANs on Tx and don't strip on Rx */
ctxt - > info . vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
ICE_AQ_VSI_VLAN_EMOD_NOTHING ;
status = ice_update_vsi ( hw , vsi - > idx , ctxt , NULL ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Failed to update VSI for safe mode VLANs, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
} else {
vsi - > info . sec_flags = ctxt - > info . sec_flags ;
vsi - > info . sw_flags2 = ctxt - > info . sw_flags2 ;
vsi - > info . vlan_flags = ctxt - > info . vlan_flags ;
}
kfree ( ctxt ) ;
}
/**
* ice_log_pkg_init - log result of DDP package load
* @ hw : pointer to hardware info
* @ status : status of package load
*/
static void
ice_log_pkg_init ( struct ice_hw * hw , enum ice_status * status )
{
struct ice_pf * pf = ( struct ice_pf * ) hw - > back ;
struct device * dev = ice_pf_to_dev ( pf ) ;
switch ( * status ) {
case ICE_SUCCESS :
/* The package download AdminQ command returned success because
* this download succeeded or ICE_ERR_AQ_NO_WORK since there is
* already a package loaded on the device .
*/
if ( hw - > pkg_ver . major = = hw - > active_pkg_ver . major & &
hw - > pkg_ver . minor = = hw - > active_pkg_ver . minor & &
hw - > pkg_ver . update = = hw - > active_pkg_ver . update & &
hw - > pkg_ver . draft = = hw - > active_pkg_ver . draft & &
! memcmp ( hw - > pkg_name , hw - > active_pkg_name ,
sizeof ( hw - > pkg_name ) ) ) {
if ( hw - > pkg_dwnld_status = = ICE_AQ_RC_EEXIST )
dev_info ( dev , " DDP package already present on device: %s version %d.%d.%d.%d \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ) ;
else
dev_info ( dev , " The DDP package was successfully loaded: %s version %d.%d.%d.%d \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ) ;
} else if ( hw - > active_pkg_ver . major ! = ICE_PKG_SUPP_VER_MAJ | |
hw - > active_pkg_ver . minor ! = ICE_PKG_SUPP_VER_MNR ) {
dev_err ( dev , " The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode. \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
ICE_PKG_SUPP_VER_MAJ , ICE_PKG_SUPP_VER_MNR ) ;
* status = ICE_ERR_NOT_SUPPORTED ;
} else if ( hw - > active_pkg_ver . major = = ICE_PKG_SUPP_VER_MAJ & &
hw - > active_pkg_ver . minor = = ICE_PKG_SUPP_VER_MNR ) {
dev_info ( dev , " The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d. \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ,
hw - > pkg_name ,
hw - > pkg_ver . major ,
hw - > pkg_ver . minor ,
hw - > pkg_ver . update ,
hw - > pkg_ver . draft ) ;
} else {
dev_err ( dev , " An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode. \n " ) ;
* status = ICE_ERR_NOT_SUPPORTED ;
}
break ;
case ICE_ERR_FW_DDP_MISMATCH :
dev_err ( dev , " The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode. \n " ) ;
break ;
case ICE_ERR_BUF_TOO_SHORT :
case ICE_ERR_CFG :
dev_err ( dev , " The DDP package file is invalid. Entering Safe Mode. \n " ) ;
break ;
case ICE_ERR_NOT_SUPPORTED :
/* Package File version not supported */
if ( hw - > pkg_ver . major > ICE_PKG_SUPP_VER_MAJ | |
( hw - > pkg_ver . major = = ICE_PKG_SUPP_VER_MAJ & &
hw - > pkg_ver . minor > ICE_PKG_SUPP_VER_MNR ) )
dev_err ( dev , " The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode. \n " ) ;
else if ( hw - > pkg_ver . major < ICE_PKG_SUPP_VER_MAJ | |
( hw - > pkg_ver . major = = ICE_PKG_SUPP_VER_MAJ & &
hw - > pkg_ver . minor < ICE_PKG_SUPP_VER_MNR ) )
dev_err ( dev , " The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode. \n " ,
ICE_PKG_SUPP_VER_MAJ , ICE_PKG_SUPP_VER_MNR ) ;
break ;
case ICE_ERR_AQ_ERROR :
switch ( hw - > pkg_dwnld_status ) {
case ICE_AQ_RC_ENOSEC :
case ICE_AQ_RC_EBADSIG :
dev_err ( dev , " The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode. \n " ) ;
return ;
case ICE_AQ_RC_ESVN :
dev_err ( dev , " The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode. \n " ) ;
return ;
case ICE_AQ_RC_EBADMAN :
case ICE_AQ_RC_EBADBUF :
dev_err ( dev , " An error occurred on the device while loading the DDP package. The device will be reset. \n " ) ;
/* poll for reset to complete */
if ( ice_check_reset ( hw ) )
dev_err ( dev , " Error resetting device. Please reload the driver \n " ) ;
return ;
default :
break ;
}
fallthrough ;
default :
dev_err ( dev , " An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode. \n " ,
* status ) ;
break ;
}
}
/**
* ice_load_pkg - load / reload the DDP Package file
* @ firmware : firmware structure when firmware requested or NULL for reload
* @ pf : pointer to the PF instance
*
* Called on probe and post CORER / GLOBR rebuild to load DDP Package and
* initialize HW tables .
*/
static void
ice_load_pkg ( const struct firmware * firmware , struct ice_pf * pf )
{
enum ice_status status = ICE_ERR_PARAM ;
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
/* Load DDP Package */
if ( firmware & & ! hw - > pkg_copy ) {
status = ice_copy_and_init_pkg ( hw , firmware - > data ,
firmware - > size ) ;
ice_log_pkg_init ( hw , & status ) ;
} else if ( ! firmware & & hw - > pkg_copy ) {
/* Reload package during rebuild after CORER/GLOBR reset */
status = ice_init_pkg ( hw , hw - > pkg_copy , hw - > pkg_size ) ;
ice_log_pkg_init ( hw , & status ) ;
} else {
dev_err ( dev , " The DDP package file failed to load. Entering Safe Mode. \n " ) ;
}
if ( status ) {
/* Safe Mode */
clear_bit ( ICE_FLAG_ADV_FEATURES , pf - > flags ) ;
return ;
}
/* Successful download package is the precondition for advanced
* features , hence setting the ICE_FLAG_ADV_FEATURES flag
*/
set_bit ( ICE_FLAG_ADV_FEATURES , pf - > flags ) ;
}
/**
* ice_verify_cacheline_size - verify driver ' s assumption of 64 Byte cache lines
* @ pf : pointer to the PF structure
*
* There is no error returned here because the driver should be able to handle
* 128 Byte cache lines , so we only print a warning in case issues are seen ,
* specifically with Tx .
*/
static void ice_verify_cacheline_size ( struct ice_pf * pf )
{
if ( rd32 ( & pf - > hw , GLPCI_CNF2 ) & GLPCI_CNF2_CACHELINE_SIZE_M )
dev_warn ( ice_pf_to_dev ( pf ) , " %d Byte cache line assumption is invalid, driver may have Tx timeouts! \n " ,
ICE_CACHE_LINE_BYTES ) ;
}
/**
* ice_send_version - update firmware with driver version
* @ pf : PF struct
*
* Returns ICE_SUCCESS on success , else error code
*/
static enum ice_status ice_send_version ( struct ice_pf * pf )
{
struct ice_driver_ver dv ;
dv . major_ver = 0xff ;
dv . minor_ver = 0xff ;
dv . build_ver = 0xff ;
dv . subbuild_ver = 0 ;
strscpy ( ( char * ) dv . driver_string , UTS_RELEASE ,
sizeof ( dv . driver_string ) ) ;
return ice_aq_send_driver_ver ( & pf - > hw , & dv , NULL ) ;
}
/**
* ice_init_fdir - Initialize flow director VSI and configuration
* @ pf : pointer to the PF instance
*
* returns 0 on success , negative on error
*/
static int ice_init_fdir ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_vsi * ctrl_vsi ;
int err ;
/* Side Band Flow Director needs to have a control VSI.
* Allocate it and store it in the PF .
*/
ctrl_vsi = ice_ctrl_vsi_setup ( pf , pf - > hw . port_info ) ;
if ( ! ctrl_vsi ) {
dev_dbg ( dev , " could not create control VSI \n " ) ;
return - ENOMEM ;
}
err = ice_vsi_open_ctrl ( ctrl_vsi ) ;
if ( err ) {
dev_dbg ( dev , " could not open control VSI \n " ) ;
goto err_vsi_open ;
}
mutex_init ( & pf - > hw . fdir_fltr_lock ) ;
err = ice_fdir_create_dflt_rules ( pf ) ;
if ( err )
goto err_fdir_rule ;
return 0 ;
err_fdir_rule :
ice_fdir_release_flows ( & pf - > hw ) ;
ice_vsi_close ( ctrl_vsi ) ;
err_vsi_open :
ice_vsi_release ( ctrl_vsi ) ;
if ( pf - > ctrl_vsi_idx ! = ICE_NO_VSI ) {
pf - > vsi [ pf - > ctrl_vsi_idx ] = NULL ;
pf - > ctrl_vsi_idx = ICE_NO_VSI ;
}
return err ;
}
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @ pf : pointer to the PF instance
*/
static char * ice_get_opt_fw_name ( struct ice_pf * pf )
{
/* Optional firmware name same as default with additional dash
* followed by a EUI - 64 identifier ( PCIe Device Serial Number )
*/
struct pci_dev * pdev = pf - > pdev ;
char * opt_fw_filename ;
u64 dsn ;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability ) .
*/
dsn = pci_get_dsn ( pdev ) ;
if ( ! dsn )
return NULL ;
opt_fw_filename = kzalloc ( NAME_MAX , GFP_KERNEL ) ;
if ( ! opt_fw_filename )
return NULL ;
snprintf ( opt_fw_filename , NAME_MAX , " %sice-%016llx.pkg " ,
ICE_DDP_PKG_PATH , dsn ) ;
return opt_fw_filename ;
}
/**
* ice_request_fw - Device initialization routine
* @ pf : pointer to the PF instance
*/
static void ice_request_fw ( struct ice_pf * pf )
{
char * opt_fw_filename = ice_get_opt_fw_name ( pf ) ;
const struct firmware * firmware = NULL ;
struct device * dev = ice_pf_to_dev ( pf ) ;
int err = 0 ;
/* optional device-specific DDP (if present) overrides the default DDP
* package file . kernel logs a debug message if the file doesn ' t exist ,
* and warning messages for other errors .
*/
if ( opt_fw_filename ) {
err = firmware_request_nowarn ( & firmware , opt_fw_filename , dev ) ;
if ( err ) {
kfree ( opt_fw_filename ) ;
goto dflt_pkg_load ;
}
/* request for firmware was successful. Download to device */
ice_load_pkg ( firmware , pf ) ;
kfree ( opt_fw_filename ) ;
release_firmware ( firmware ) ;
return ;
}
dflt_pkg_load :
err = request_firmware ( & firmware , ICE_DDP_PKG_FILE , dev ) ;
if ( err ) {
dev_err ( dev , " The DDP package file was not found or could not be read. Entering Safe Mode \n " ) ;
return ;
}
/* request for firmware was successful. Download to device */
ice_load_pkg ( firmware , pf ) ;
release_firmware ( firmware ) ;
}
/**
* ice_print_wake_reason - show the wake up cause in the log
* @ pf : pointer to the PF struct
*/
static void ice_print_wake_reason ( struct ice_pf * pf )
{
u32 wus = pf - > wakeup_reason ;
const char * wake_str ;
/* if no wake event, nothing to print */
if ( ! wus )
return ;
if ( wus & PFPM_WUS_LNKC_M )
wake_str = " Link \n " ;
else if ( wus & PFPM_WUS_MAG_M )
wake_str = " Magic Packet \n " ;
else if ( wus & PFPM_WUS_MNG_M )
wake_str = " Management \n " ;
else if ( wus & PFPM_WUS_FW_RST_WK_M )
wake_str = " Firmware Reset \n " ;
else
wake_str = " Unknown \n " ;
dev_info ( ice_pf_to_dev ( pf ) , " Wake reason: %s " , wake_str ) ;
}
2021-07-12 19:01:19 +00:00
/**
* ice_register_netdev - register netdev and devlink port
* @ pf : pointer to the PF struct
*/
static int ice_register_netdev ( struct ice_pf * pf )
{
struct ice_vsi * vsi ;
int err = 0 ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi | | ! vsi - > netdev )
return - EIO ;
err = register_netdev ( vsi - > netdev ) ;
if ( err )
goto err_register_netdev ;
set_bit ( ICE_VSI_NETDEV_REGISTERED , vsi - > state ) ;
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_stop_all_queues ( vsi - > netdev ) ;
err = ice_devlink_create_port ( vsi ) ;
if ( err )
goto err_devlink_create ;
devlink_port_type_eth_set ( & vsi - > devlink_port , vsi - > netdev ) ;
return 0 ;
err_devlink_create :
unregister_netdev ( vsi - > netdev ) ;
clear_bit ( ICE_VSI_NETDEV_REGISTERED , vsi - > state ) ;
err_register_netdev :
free_netdev ( vsi - > netdev ) ;
vsi - > netdev = NULL ;
clear_bit ( ICE_VSI_NETDEV_ALLOCD , vsi - > state ) ;
return err ;
}
2021-05-26 19:09:36 +00:00
/**
* ice_probe - Device initialization routine
* @ pdev : PCI device information struct
* @ ent : entry in ice_pci_tbl
*
* Returns 0 on success , negative on failure
*/
static int
ice_probe ( struct pci_dev * pdev , const struct pci_device_id __always_unused * ent )
{
struct device * dev = & pdev - > dev ;
struct ice_pf * pf ;
struct ice_hw * hw ;
int i , err ;
/* this driver uses devres, see
* Documentation / driver - api / driver - model / devres . rst
*/
err = pcim_enable_device ( pdev ) ;
if ( err )
return err ;
2021-07-12 19:01:19 +00:00
err = pcim_iomap_regions ( pdev , BIT ( ICE_BAR0 ) , dev_driver_string ( dev ) ) ;
2021-05-26 19:09:36 +00:00
if ( err ) {
dev_err ( dev , " BAR0 I/O map error %d \n " , err ) ;
return err ;
}
pf = ice_allocate_pf ( dev ) ;
if ( ! pf )
return - ENOMEM ;
/* set up for high or low DMA */
err = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 64 ) ) ;
if ( err )
err = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 32 ) ) ;
if ( err ) {
dev_err ( dev , " DMA configuration failed: 0x%x \n " , err ) ;
return err ;
}
pci_enable_pcie_error_reporting ( pdev ) ;
pci_set_master ( pdev ) ;
pf - > pdev = pdev ;
pci_set_drvdata ( pdev , pf ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_DOWN , pf - > state ) ;
2021-05-26 19:09:36 +00:00
/* Disable service task until DOWN bit is cleared */
2021-07-12 19:01:19 +00:00
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
hw = & pf - > hw ;
hw - > hw_addr = pcim_iomap_table ( pdev ) [ ICE_BAR0 ] ;
pci_save_state ( pdev ) ;
hw - > back = pf ;
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
pci_read_config_byte ( pdev , PCI_REVISION_ID , & hw - > revision_id ) ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
hw - > bus . device = PCI_SLOT ( pdev - > devfn ) ;
hw - > bus . func = PCI_FUNC ( pdev - > devfn ) ;
ice_set_ctrlq_len ( hw ) ;
pf - > msg_enable = netif_msg_init ( debug , ICE_DFLT_NETIF_M ) ;
err = ice_devlink_register ( pf ) ;
if ( err ) {
dev_err ( dev , " ice_devlink_register failed: %d \n " , err ) ;
goto err_exit_unroll ;
}
# ifndef CONFIG_DYNAMIC_DEBUG
if ( debug < - 1 )
hw - > debug_mask = debug ;
# endif
err = ice_init_hw ( hw ) ;
if ( err ) {
dev_err ( dev , " ice_init_hw failed: %d \n " , err ) ;
err = - EIO ;
goto err_exit_unroll ;
}
ice_request_fw ( pf ) ;
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf - > state , which will cause ice_is_safe_mode to return
* true
*/
if ( ice_is_safe_mode ( pf ) ) {
dev_err ( dev , " Package download failed. Advanced features disabled - Device now in Safe Mode \n " ) ;
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode . Instead of
* adding conditional logic everywhere to ignore these
* device / function capabilities , override them .
*/
ice_set_safe_mode_caps ( hw ) ;
}
err = ice_init_pf ( pf ) ;
if ( err ) {
dev_err ( dev , " ice_init_pf failed: %d \n " , err ) ;
goto err_init_pf_unroll ;
}
ice_devlink_init_regions ( pf ) ;
pf - > hw . udp_tunnel_nic . set_port = ice_udp_tunnel_set_port ;
pf - > hw . udp_tunnel_nic . unset_port = ice_udp_tunnel_unset_port ;
pf - > hw . udp_tunnel_nic . flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP ;
pf - > hw . udp_tunnel_nic . shared = & pf - > hw . udp_tunnel_shared ;
i = 0 ;
if ( pf - > hw . tnl . valid_count [ TNL_VXLAN ] ) {
pf - > hw . udp_tunnel_nic . tables [ i ] . n_entries =
pf - > hw . tnl . valid_count [ TNL_VXLAN ] ;
pf - > hw . udp_tunnel_nic . tables [ i ] . tunnel_types =
UDP_TUNNEL_TYPE_VXLAN ;
i + + ;
}
if ( pf - > hw . tnl . valid_count [ TNL_GENEVE ] ) {
pf - > hw . udp_tunnel_nic . tables [ i ] . n_entries =
pf - > hw . tnl . valid_count [ TNL_GENEVE ] ;
pf - > hw . udp_tunnel_nic . tables [ i ] . tunnel_types =
UDP_TUNNEL_TYPE_GENEVE ;
i + + ;
}
pf - > num_alloc_vsi = hw - > func_caps . guar_num_vsi ;
if ( ! pf - > num_alloc_vsi ) {
err = - EIO ;
goto err_init_pf_unroll ;
}
if ( pf - > num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ) {
dev_warn ( & pf - > pdev - > dev ,
" limiting the VSI count due to UDP tunnel limitation %d > %d \n " ,
pf - > num_alloc_vsi , UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ) ;
pf - > num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ;
}
pf - > vsi = devm_kcalloc ( dev , pf - > num_alloc_vsi , sizeof ( * pf - > vsi ) ,
GFP_KERNEL ) ;
if ( ! pf - > vsi ) {
err = - ENOMEM ;
goto err_init_pf_unroll ;
}
err = ice_init_interrupt_scheme ( pf ) ;
if ( err ) {
dev_err ( dev , " ice_init_interrupt_scheme failed: %d \n " , err ) ;
err = - EIO ;
goto err_init_vsi_unroll ;
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc . In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open .
*/
err = ice_req_irq_msix_misc ( pf ) ;
if ( err ) {
dev_err ( dev , " setup of misc vector failed: %d \n " , err ) ;
goto err_init_interrupt_unroll ;
}
/* create switch struct for the switch element created by FW on boot */
pf - > first_sw = devm_kzalloc ( dev , sizeof ( * pf - > first_sw ) , GFP_KERNEL ) ;
if ( ! pf - > first_sw ) {
err = - ENOMEM ;
goto err_msix_misc_unroll ;
}
if ( hw - > evb_veb )
pf - > first_sw - > bridge_mode = BRIDGE_MODE_VEB ;
else
pf - > first_sw - > bridge_mode = BRIDGE_MODE_VEPA ;
pf - > first_sw - > pf = pf ;
/* record the sw_id available for later use */
pf - > first_sw - > sw_id = hw - > port_info - > sw_id ;
err = ice_setup_pf_sw ( pf ) ;
if ( err ) {
dev_err ( dev , " probe failed due to setup PF switch: %d \n " , err ) ;
goto err_alloc_sw_unroll ;
}
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
/* tell the firmware we are up */
err = ice_send_version ( pf ) ;
if ( err ) {
dev_err ( dev , " probe failed sending driver version %s. error: %d \n " ,
UTS_RELEASE , err ) ;
goto err_send_version_unroll ;
}
/* since everything is good, start the service timer */
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
err = ice_init_link_events ( pf - > hw . port_info ) ;
if ( err ) {
dev_err ( dev , " ice_init_link_events failed: %d \n " , err ) ;
goto err_send_version_unroll ;
}
/* not a fatal error if this fails */
err = ice_init_nvm_phy_type ( pf - > hw . port_info ) ;
if ( err )
dev_err ( dev , " ice_init_nvm_phy_type failed: %d \n " , err ) ;
/* not a fatal error if this fails */
err = ice_update_link_info ( pf - > hw . port_info ) ;
if ( err )
dev_err ( dev , " ice_update_link_info failed: %d \n " , err ) ;
ice_init_link_dflt_override ( pf - > hw . port_info ) ;
2021-07-20 16:20:39 +00:00
ice_check_module_power ( pf , pf - > hw . port_info - > phy . link_info . link_cfg_err ) ;
2021-05-26 19:09:36 +00:00
/* if media available, initialize PHY settings */
if ( pf - > hw . port_info - > phy . link_info . link_info &
ICE_AQ_MEDIA_AVAILABLE ) {
/* not a fatal error if this fails */
err = ice_init_phy_user_cfg ( pf - > hw . port_info ) ;
if ( err )
dev_err ( dev , " ice_init_phy_user_cfg failed: %d \n " , err ) ;
if ( ! test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) ) {
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
if ( vsi )
ice_configure_phy ( vsi ) ;
}
} else {
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
}
ice_verify_cacheline_size ( pf ) ;
/* Save wakeup reason register for later use */
pf - > wakeup_reason = rd32 ( hw , PFPM_WUS ) ;
/* check for a power management event */
ice_print_wake_reason ( pf ) ;
/* clear wake status, all bits */
wr32 ( hw , PFPM_WUS , U32_MAX ) ;
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable ( dev , false ) ;
if ( ice_is_safe_mode ( pf ) ) {
ice_set_safe_mode_vlan_cfg ( pf ) ;
goto probe_done ;
}
/* initialize DDP driven features */
2021-07-20 16:20:39 +00:00
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_init ( pf ) ;
2021-05-26 19:09:36 +00:00
/* Note: Flow director init failure is non-fatal to load */
if ( ice_init_fdir ( pf ) )
dev_err ( dev , " could not initialize flow director \n " ) ;
/* Note: DCB init failure is non-fatal to load */
if ( ice_init_pf_dcb ( pf , false ) ) {
clear_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
clear_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) ;
} else {
ice_cfg_lldp_mib_change ( & pf - > hw , true ) ;
}
if ( ice_init_lag ( pf ) )
dev_warn ( dev , " Failed to init link aggregation support \n " ) ;
/* print PCI link speed and width */
pcie_print_link_status ( pf - > pdev ) ;
probe_done :
2021-07-12 19:01:19 +00:00
err = ice_register_netdev ( pf ) ;
if ( err )
goto err_netdev_reg ;
2021-05-26 19:09:36 +00:00
/* ready to go, so clear down state bit */
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_DOWN , pf - > state ) ;
2021-07-20 16:20:39 +00:00
if ( ice_is_aux_ena ( pf ) ) {
pf - > aux_idx = ida_alloc ( & ice_aux_ida , GFP_KERNEL ) ;
if ( pf - > aux_idx < 0 ) {
dev_err ( dev , " Failed to allocate device ID for AUX driver \n " ) ;
err = - ENOMEM ;
goto err_netdev_reg ;
}
err = ice_init_rdma ( pf ) ;
if ( err ) {
dev_err ( dev , " Failed to initialize RDMA: %d \n " , err ) ;
err = - EIO ;
goto err_init_aux_unroll ;
}
} else {
dev_warn ( dev , " RDMA is not supported on this device \n " ) ;
}
2021-05-26 19:09:36 +00:00
return 0 ;
2021-07-20 16:20:39 +00:00
err_init_aux_unroll :
pf - > adev = NULL ;
ida_free ( & ice_aux_ida , pf - > aux_idx ) ;
2021-07-12 19:01:19 +00:00
err_netdev_reg :
2021-05-26 19:09:36 +00:00
err_send_version_unroll :
ice_vsi_release_all ( pf ) ;
err_alloc_sw_unroll :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
set_bit ( ICE_DOWN , pf - > state ) ;
2021-05-26 19:09:36 +00:00
devm_kfree ( dev , pf - > first_sw ) ;
err_msix_misc_unroll :
ice_free_irq_msix_misc ( pf ) ;
err_init_interrupt_unroll :
ice_clear_interrupt_scheme ( pf ) ;
err_init_vsi_unroll :
devm_kfree ( dev , pf - > vsi ) ;
err_init_pf_unroll :
ice_deinit_pf ( pf ) ;
ice_devlink_destroy_regions ( pf ) ;
ice_deinit_hw ( hw ) ;
err_exit_unroll :
ice_devlink_unregister ( pf ) ;
pci_disable_pcie_error_reporting ( pdev ) ;
pci_disable_device ( pdev ) ;
return err ;
}
/**
* ice_set_wake - enable or disable Wake on LAN
* @ pf : pointer to the PF struct
*
* Simple helper for WoL control
*/
static void ice_set_wake ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
bool wol = pf - > wol_ena ;
/* clear wake state, otherwise new wake events won't fire */
wr32 ( hw , PFPM_WUS , U32_MAX ) ;
/* enable / disable APM wake up, no RMW needed */
wr32 ( hw , PFPM_APM , wol ? PFPM_APM_APME_M : 0 ) ;
/* set magic packet filter enabled */
wr32 ( hw , PFPM_WUFC , wol ? PFPM_WUFC_MAG_M : 0 ) ;
}
/**
2021-07-12 19:01:19 +00:00
* ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
2021-05-26 19:09:36 +00:00
* @ pf : pointer to the PF struct
*
* Issue firmware command to enable multicast magic wake , making
* sure that any locally administered address ( LAA ) is used for
* wake , and that PF reset doesn ' t undo the LAA .
*/
static void ice_setup_mc_magic_wake ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
enum ice_status status ;
u8 mac_addr [ ETH_ALEN ] ;
struct ice_vsi * vsi ;
u8 flags ;
if ( ! pf - > wol_ena )
return ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
return ;
/* Get current MAC address in case it's an LAA */
if ( vsi - > netdev )
ether_addr_copy ( mac_addr , vsi - > netdev - > dev_addr ) ;
else
ether_addr_copy ( mac_addr , vsi - > port_info - > mac . perm_addr ) ;
flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP ;
status = ice_aq_manage_mac_write ( hw , mac_addr , flags , NULL ) ;
if ( status )
dev_err ( dev , " Failed to enable Multicast Magic Packet wake, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
}
/**
* ice_remove - Device removal routine
* @ pdev : PCI device information struct
*/
static void ice_remove ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
int i ;
if ( ! pf )
return ;
for ( i = 0 ; i < ICE_MAX_RESET_WAIT ; i + + ) {
if ( ! ice_is_reset_in_progress ( pf - > state ) )
break ;
msleep ( 100 ) ;
}
if ( test_bit ( ICE_FLAG_SRIOV_ENA , pf - > flags ) ) {
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VF_RESETS_DISABLED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_free_vfs ( pf ) ;
}
ice_service_task_stop ( pf ) ;
ice_aq_cancel_waiting_tasks ( pf ) ;
2021-07-20 16:20:39 +00:00
ice_unplug_aux_dev ( pf ) ;
ida_free ( & ice_aux_ida , pf - > aux_idx ) ;
set_bit ( ICE_DOWN , pf - > state ) ;
2021-05-26 19:09:36 +00:00
mutex_destroy ( & ( & pf - > hw ) - > fdir_fltr_lock ) ;
ice_deinit_lag ( pf ) ;
2021-07-20 16:20:39 +00:00
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_release ( pf ) ;
2021-05-26 19:09:36 +00:00
if ( ! ice_is_safe_mode ( pf ) )
ice_remove_arfs ( pf ) ;
ice_setup_mc_magic_wake ( pf ) ;
ice_vsi_release_all ( pf ) ;
ice_set_wake ( pf ) ;
ice_free_irq_msix_misc ( pf ) ;
ice_for_each_vsi ( pf , i ) {
if ( ! pf - > vsi [ i ] )
continue ;
ice_vsi_free_q_vectors ( pf - > vsi [ i ] ) ;
}
ice_deinit_pf ( pf ) ;
ice_devlink_destroy_regions ( pf ) ;
ice_deinit_hw ( & pf - > hw ) ;
ice_devlink_unregister ( pf ) ;
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset ( ) since there is no need to rebuild
* and the service task is already stopped .
*/
ice_reset ( & pf - > hw , ICE_RESET_PFR ) ;
pci_wait_for_pending_transaction ( pdev ) ;
ice_clear_interrupt_scheme ( pf ) ;
pci_disable_pcie_error_reporting ( pdev ) ;
pci_disable_device ( pdev ) ;
}
/**
* ice_shutdown - PCI callback for shutting down device
* @ pdev : PCI device information struct
*/
static void ice_shutdown ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
ice_remove ( pdev ) ;
if ( system_state = = SYSTEM_POWER_OFF ) {
pci_wake_from_d3 ( pdev , pf - > wol_ena ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
}
# ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @ pf : board private structure
*
* Inform or close all dependent features in prep for PCI device shutdown
*/
static void ice_prepare_for_shutdown ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
u32 v ;
/* Notify VFs of impending reset */
if ( ice_check_sq_alive ( hw , & hw - > mailboxq ) )
ice_vc_notify_reset ( pf ) ;
dev_dbg ( ice_pf_to_dev ( pf ) , " Tearing down internal switch for shutdown \n " ) ;
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi ( pf , false ) ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] )
pf - > vsi [ v ] - > vsi_num = 0 ;
ice_shutdown_all_ctrlq ( hw ) ;
}
/**
* ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
* @ pf : board private structure to reinitialize
*
* This routine reinitialize interrupt scheme that was cleared during
* power management suspend callback .
*
* This should be called during resume routine to re - allocate the q_vectors
* and reacquire interrupts .
*/
static int ice_reinit_interrupt_scheme ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
int ret , v ;
/* Since we clear MSIX flag during suspend, we need to
* set it back during resume . . .
*/
ret = ice_init_interrupt_scheme ( pf ) ;
if ( ret ) {
dev_err ( dev , " Failed to re-initialize interrupt %d \n " , ret ) ;
return ret ;
}
/* Remap vectors and rings, after successful re-init interrupts */
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
ret = ice_vsi_alloc_q_vectors ( pf - > vsi [ v ] ) ;
if ( ret )
goto err_reinit ;
ice_vsi_map_rings_to_vectors ( pf - > vsi [ v ] ) ;
}
ret = ice_req_irq_msix_misc ( pf ) ;
if ( ret ) {
dev_err ( dev , " Setting up misc vector failed after device suspend %d \n " ,
ret ) ;
goto err_reinit ;
}
return 0 ;
err_reinit :
while ( v - - )
if ( pf - > vsi [ v ] )
ice_vsi_free_q_vectors ( pf - > vsi [ v ] ) ;
return ret ;
}
/**
* ice_suspend
* @ dev : generic device information structure
*
* Power Management callback to quiesce the device and prepare
* for D3 transition .
*/
static int __maybe_unused ice_suspend ( struct device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct ice_pf * pf ;
int disabled , v ;
pf = pci_get_drvdata ( pdev ) ;
if ( ! ice_pf_state_is_nominal ( pf ) ) {
dev_err ( dev , " Device is not ready, no need to suspend it \n " ) ;
return - EBUSY ;
}
/* Stop watchdog tasks until resume completion.
* Even though it is most likely that the service task is
* disabled if the device is suspended or down , the service task ' s
* state is controlled by a different state bit , and we should
* store and honor whatever state that bit is in at this point .
*/
disabled = ice_service_task_stop ( pf ) ;
2021-07-20 16:20:39 +00:00
ice_unplug_aux_dev ( pf ) ;
2021-05-26 19:09:36 +00:00
/* Already suspended?, then there is nothing to do */
2021-07-12 19:01:19 +00:00
if ( test_and_set_bit ( ICE_SUSPENDED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
if ( ! disabled )
ice_service_task_restart ( pf ) ;
return 0 ;
}
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
2021-05-26 19:09:36 +00:00
ice_is_reset_in_progress ( pf - > state ) ) {
dev_err ( dev , " can't suspend device in reset or already down \n " ) ;
if ( ! disabled )
ice_service_task_restart ( pf ) ;
return 0 ;
}
ice_setup_mc_magic_wake ( pf ) ;
ice_prepare_for_shutdown ( pf ) ;
ice_set_wake ( pf ) ;
/* Free vectors, clear the interrupt scheme and release IRQs
* for proper hibernation , especially with large number of CPUs .
* Otherwise hibernation might fail when mapping all the vectors back
* to CPU0 .
*/
ice_free_irq_msix_misc ( pf ) ;
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
ice_vsi_free_q_vectors ( pf - > vsi [ v ] ) ;
}
ice_free_cpu_rx_rmap ( ice_get_main_vsi ( pf ) ) ;
ice_clear_interrupt_scheme ( pf ) ;
pci_save_state ( pdev ) ;
pci_wake_from_d3 ( pdev , pf - > wol_ena ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
return 0 ;
}
/**
* ice_resume - PM callback for waking up from D3
* @ dev : generic device information structure
*/
static int __maybe_unused ice_resume ( struct device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
enum ice_reset_req reset_type ;
struct ice_pf * pf ;
struct ice_hw * hw ;
int ret ;
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
pci_save_state ( pdev ) ;
if ( ! pci_device_is_present ( pdev ) )
return - ENODEV ;
ret = pci_enable_device_mem ( pdev ) ;
if ( ret ) {
dev_err ( dev , " Cannot enable device after suspend \n " ) ;
return ret ;
}
pf = pci_get_drvdata ( pdev ) ;
hw = & pf - > hw ;
pf - > wakeup_reason = rd32 ( hw , PFPM_WUS ) ;
ice_print_wake_reason ( pf ) ;
/* We cleared the interrupt scheme when we suspended, so we need to
* restore it now to resume device functionality .
*/
ret = ice_reinit_interrupt_scheme ( pf ) ;
if ( ret )
dev_err ( dev , " Cannot restore interrupt scheme: %d \n " , ret ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_DOWN , pf - > state ) ;
2021-05-26 19:09:36 +00:00
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR ;
/* re-enable service task for reset, but allow reset to schedule it */
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
if ( ice_schedule_reset ( pf , reset_type ) )
dev_err ( dev , " Reset during resume failed. \n " ) ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_SUSPENDED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_service_task_restart ( pf ) ;
/* Restart the service task */
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
return 0 ;
}
# endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
* @ pdev : PCI device information struct
* @ err : the type of PCI error
*
* Called to warn that something happened on the PCI bus and the error handling
* is in progress . Allows the driver to gracefully prepare / handle PCI errors .
*/
static pci_ers_result_t
ice_pci_err_detected ( struct pci_dev * pdev , pci_channel_state_t err )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
if ( ! pf ) {
dev_err ( & pdev - > dev , " %s: unrecoverable device error %d \n " ,
__func__ , err ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
ice_service_task_stop ( pf ) ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ) {
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_prepare_for_reset ( pf ) ;
}
}
return PCI_ERS_RESULT_NEED_RESET ;
}
/**
* ice_pci_err_slot_reset - a PCI slot reset has just happened
* @ pdev : PCI device information struct
*
* Called to determine if the driver can recover from the PCI slot reset by
* using a register read to determine if the device is recoverable .
*/
static pci_ers_result_t ice_pci_err_slot_reset ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
pci_ers_result_t result ;
int err ;
u32 reg ;
err = pci_enable_device_mem ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Cannot re-enable PCI device after reset, error %d \n " ,
err ) ;
result = PCI_ERS_RESULT_DISCONNECT ;
} else {
pci_set_master ( pdev ) ;
pci_restore_state ( pdev ) ;
pci_save_state ( pdev ) ;
pci_wake_from_d3 ( pdev , false ) ;
/* Check for life */
reg = rd32 ( & pf - > hw , GLGEN_RTRIG ) ;
if ( ! reg )
result = PCI_ERS_RESULT_RECOVERED ;
else
result = PCI_ERS_RESULT_DISCONNECT ;
}
err = pci_aer_clear_nonfatal_status ( pdev ) ;
if ( err )
dev_dbg ( & pdev - > dev , " pci_aer_clear_nonfatal_status() failed, error %d \n " ,
err ) ;
/* non-fatal, continue */
return result ;
}
/**
* ice_pci_err_resume - restart operations after PCI error recovery
* @ pdev : PCI device information struct
*
* Called to allow the driver to bring things back up after PCI error and / or
* reset recovery have finished
*/
static void ice_pci_err_resume ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
if ( ! pf ) {
dev_err ( & pdev - > dev , " %s failed, device is unrecoverable \n " ,
__func__ ) ;
return ;
}
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
dev_dbg ( & pdev - > dev , " %s failed to resume normal operations! \n " ,
__func__ ) ;
return ;
}
ice_restore_all_vfs_msi_state ( pdev ) ;
ice_do_reset ( pf , ICE_RESET_PFR ) ;
ice_service_task_restart ( pf ) ;
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
}
/**
* ice_pci_err_reset_prepare - prepare device driver for PCI reset
* @ pdev : PCI device information struct
*/
static void ice_pci_err_reset_prepare ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
ice_service_task_stop ( pf ) ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ) {
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
ice_prepare_for_reset ( pf ) ;
}
}
}
/**
* ice_pci_err_reset_done - PCI reset done , device driver reset can begin
* @ pdev : PCI device information struct
*/
static void ice_pci_err_reset_done ( struct pci_dev * pdev )
{
ice_pci_err_resume ( pdev ) ;
}
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries ( PCI_ANY_ID ) should come last
* Last entry must be all 0 s
*
* { Vendor ID , Device ID , SubVendor ID , SubDevice ID ,
* Class , Class Mask , private data ( not used ) }
*/
static const struct pci_device_id ice_pci_tbl [ ] = {
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_BACKPLANE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_QSFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810_XXV_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_BACKPLANE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_QSFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_10G_BASE_T ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_SGMII ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_BACKPLANE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_QSFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_10G_BASE_T ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_SGMII ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_BACKPLANE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_10G_BASE_T ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_SGMII ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_BACKPLANE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_SFP ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_10G_BASE_T ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_1GBE ) , 0 } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_QSFP ) , 0 } ,
/* required last entry */
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , ice_pci_tbl ) ;
static __maybe_unused SIMPLE_DEV_PM_OPS ( ice_pm_ops , ice_suspend , ice_resume ) ;
static const struct pci_error_handlers ice_pci_err_handler = {
. error_detected = ice_pci_err_detected ,
. slot_reset = ice_pci_err_slot_reset ,
. reset_prepare = ice_pci_err_reset_prepare ,
. reset_done = ice_pci_err_reset_done ,
. resume = ice_pci_err_resume
} ;
static struct pci_driver ice_driver = {
. name = KBUILD_MODNAME ,
. id_table = ice_pci_tbl ,
. probe = ice_probe ,
. remove = ice_remove ,
# ifdef CONFIG_PM
. driver . pm = & ice_pm_ops ,
# endif /* CONFIG_PM */
. shutdown = ice_shutdown ,
. sriov_configure = ice_sriov_configure ,
. err_handler = & ice_pci_err_handler
} ;
/**
* ice_module_init - Driver registration routine
*
* ice_module_init is the first routine called when the driver is
* loaded . All it does is register with the PCI subsystem .
*/
static int __init ice_module_init ( void )
{
int status ;
pr_info ( " %s \n " , ice_driver_string ) ;
pr_info ( " %s \n " , ice_copyright ) ;
ice_wq = alloc_workqueue ( " %s " , WQ_MEM_RECLAIM , 0 , KBUILD_MODNAME ) ;
if ( ! ice_wq ) {
pr_err ( " Failed to create workqueue \n " ) ;
return - ENOMEM ;
}
status = pci_register_driver ( & ice_driver ) ;
if ( status ) {
pr_err ( " failed to register PCI driver, err %d \n " , status ) ;
destroy_workqueue ( ice_wq ) ;
}
return status ;
}
module_init ( ice_module_init ) ;
/**
* ice_module_exit - Driver exit cleanup routine
*
* ice_module_exit is called just before the driver is removed
* from memory .
*/
static void __exit ice_module_exit ( void )
{
pci_unregister_driver ( & ice_driver ) ;
destroy_workqueue ( ice_wq ) ;
pr_info ( " module unloaded \n " ) ;
}
module_exit ( ice_module_exit ) ;
/**
* ice_set_mac_address - NDO callback to set MAC address
* @ netdev : network interface device structure
* @ pi : pointer to an address structure
*
* Returns 0 on success , negative on failure
*/
static int ice_set_mac_address ( struct net_device * netdev , void * pi )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
struct ice_hw * hw = & pf - > hw ;
struct sockaddr * addr = pi ;
enum ice_status status ;
u8 flags = 0 ;
int err = 0 ;
u8 * mac ;
mac = ( u8 * ) addr - > sa_data ;
if ( ! is_valid_ether_addr ( mac ) )
return - EADDRNOTAVAIL ;
if ( ether_addr_equal ( netdev - > dev_addr , mac ) ) {
netdev_warn ( netdev , " already using mac %pM \n " , mac ) ;
return 0 ;
}
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
2021-05-26 19:09:36 +00:00
ice_is_reset_in_progress ( pf - > state ) ) {
netdev_err ( netdev , " can't set mac %pM. device not ready \n " ,
mac ) ;
return - EBUSY ;
}
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac ( vsi , netdev - > dev_addr , ICE_FWD_TO_VSI ) ;
if ( status & & status ! = ICE_ERR_DOES_NOT_EXIST ) {
err = - EADDRNOTAVAIL ;
goto err_update_filters ;
}
/* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac ( vsi , mac , ICE_FWD_TO_VSI ) ;
if ( status = = ICE_ERR_ALREADY_EXISTS ) {
/* Although this MAC filter is already present in hardware it's
* possible in some cases ( e . g . bonding ) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value .
*/
memcpy ( netdev - > dev_addr , mac , netdev - > addr_len ) ;
netdev_dbg ( netdev , " filter for MAC %pM already exists \n " , mac ) ;
return 0 ;
}
/* error if the new filter addition failed */
if ( status )
err = - EADDRNOTAVAIL ;
err_update_filters :
if ( err ) {
netdev_err ( netdev , " can't set MAC %pM. filter update failed \n " ,
mac ) ;
return err ;
}
/* change the netdev's MAC address */
memcpy ( netdev - > dev_addr , mac , netdev - > addr_len ) ;
netdev_dbg ( vsi - > netdev , " updated MAC address to %pM \n " ,
netdev - > dev_addr ) ;
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL ;
status = ice_aq_manage_mac_write ( hw , mac , flags , NULL ) ;
if ( status ) {
netdev_err ( netdev , " can't set MAC %pM. write to firmware failed error %s \n " ,
mac , ice_stat_str ( status ) ) ;
}
return 0 ;
}
/**
* ice_set_rx_mode - NDO callback to set the netdev filters
* @ netdev : network interface device structure
*/
static void ice_set_rx_mode ( struct net_device * netdev )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
if ( ! vsi )
return ;
/* Set the flags to synchronize filters
* ndo_set_rx_mode may be triggered even without a change in netdev
* flags
*/
2021-07-12 19:01:19 +00:00
set_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
set_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
set_bit ( ICE_FLAG_FLTR_SYNC , vsi - > back - > flags ) ;
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
ice_service_task_schedule ( vsi - > back ) ;
}
/**
* ice_set_tx_maxrate - NDO callback to set the maximum per - queue bitrate
* @ netdev : network interface device structure
* @ queue_index : Queue ID
* @ maxrate : maximum bandwidth in Mbps
*/
static int
ice_set_tx_maxrate ( struct net_device * netdev , int queue_index , u32 maxrate )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
enum ice_status status ;
u16 q_handle ;
u8 tc ;
/* Validate maxrate requested is within permitted range */
if ( maxrate & & ( maxrate > ( ICE_SCHED_MAX_BW / 1000 ) ) ) {
netdev_err ( netdev , " Invalid max rate %d specified for the queue %d \n " ,
maxrate , queue_index ) ;
return - EINVAL ;
}
q_handle = vsi - > tx_rings [ queue_index ] - > q_handle ;
tc = ice_dcb_get_tc ( vsi , queue_index ) ;
/* Set BW back to default, when user set maxrate to 0 */
if ( ! maxrate )
status = ice_cfg_q_bw_dflt_lmt ( vsi - > port_info , vsi - > idx , tc ,
q_handle , ICE_MAX_BW ) ;
else
status = ice_cfg_q_bw_lmt ( vsi - > port_info , vsi - > idx , tc ,
q_handle , ICE_MAX_BW , maxrate * 1000 ) ;
if ( status ) {
netdev_err ( netdev , " Unable to set Tx max rate, error %s \n " ,
ice_stat_str ( status ) ) ;
return - EIO ;
}
return 0 ;
}
/**
* ice_fdb_add - add an entry to the hardware database
* @ ndm : the input from the stack
* @ tb : pointer to array of nladdr ( unused )
* @ dev : the net device pointer
* @ addr : the MAC address entry being added
* @ vid : VLAN ID
* @ flags : instructions from stack about fdb operation
* @ extack : netlink extended ack
*/
static int
ice_fdb_add ( struct ndmsg * ndm , struct nlattr __always_unused * tb [ ] ,
struct net_device * dev , const unsigned char * addr , u16 vid ,
u16 flags , struct netlink_ext_ack __always_unused * extack )
{
int err ;
if ( vid ) {
netdev_err ( dev , " VLANs aren't supported yet for dev_uc|mc_add() \n " ) ;
return - EINVAL ;
}
if ( ndm - > ndm_state & & ! ( ndm - > ndm_state & NUD_PERMANENT ) ) {
netdev_err ( dev , " FDB only supports static addresses \n " ) ;
return - EINVAL ;
}
if ( is_unicast_ether_addr ( addr ) | | is_link_local_ether_addr ( addr ) )
err = dev_uc_add_excl ( dev , addr ) ;
else if ( is_multicast_ether_addr ( addr ) )
err = dev_mc_add_excl ( dev , addr ) ;
else
err = - EINVAL ;
/* Only return duplicate errors if NLM_F_EXCL is set */
if ( err = = - EEXIST & & ! ( flags & NLM_F_EXCL ) )
err = 0 ;
return err ;
}
/**
* ice_fdb_del - delete an entry from the hardware database
* @ ndm : the input from the stack
* @ tb : pointer to array of nladdr ( unused )
* @ dev : the net device pointer
* @ addr : the MAC address entry being added
* @ vid : VLAN ID
*/
static int
ice_fdb_del ( struct ndmsg * ndm , __always_unused struct nlattr * tb [ ] ,
struct net_device * dev , const unsigned char * addr ,
__always_unused u16 vid )
{
int err ;
if ( ndm - > ndm_state & NUD_PERMANENT ) {
netdev_err ( dev , " FDB only supports static addresses \n " ) ;
return - EINVAL ;
}
if ( is_unicast_ether_addr ( addr ) )
err = dev_uc_del ( dev , addr ) ;
else if ( is_multicast_ether_addr ( addr ) )
err = dev_mc_del ( dev , addr ) ;
else
err = - EINVAL ;
return err ;
}
/**
* ice_set_features - set the netdev feature flags
* @ netdev : ptr to the netdev being adjusted
* @ features : the feature set that the stack is suggesting
*/
static int
ice_set_features ( struct net_device * netdev , netdev_features_t features )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
int ret = 0 ;
/* Don't set any netdev advanced features with device in Safe Mode */
if ( ice_is_safe_mode ( vsi - > back ) ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Device is in Safe Mode - not enabling advanced netdev features \n " ) ;
return ret ;
}
/* Do not change setting during reset */
if ( ice_is_reset_in_progress ( pf - > state ) ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Device is resetting, changing advanced netdev features temporarily unavailable. \n " ) ;
return - EBUSY ;
}
/* Multiple features can be changed in one call so keep features in
* separate if / else statements to guarantee each feature is checked
*/
if ( features & NETIF_F_RXHASH & & ! ( netdev - > features & NETIF_F_RXHASH ) )
2021-07-12 19:01:19 +00:00
ice_vsi_manage_rss_lut ( vsi , true ) ;
2021-05-26 19:09:36 +00:00
else if ( ! ( features & NETIF_F_RXHASH ) & &
netdev - > features & NETIF_F_RXHASH )
2021-07-12 19:01:19 +00:00
ice_vsi_manage_rss_lut ( vsi , false ) ;
2021-05-26 19:09:36 +00:00
if ( ( features & NETIF_F_HW_VLAN_CTAG_RX ) & &
! ( netdev - > features & NETIF_F_HW_VLAN_CTAG_RX ) )
ret = ice_vsi_manage_vlan_stripping ( vsi , true ) ;
else if ( ! ( features & NETIF_F_HW_VLAN_CTAG_RX ) & &
( netdev - > features & NETIF_F_HW_VLAN_CTAG_RX ) )
ret = ice_vsi_manage_vlan_stripping ( vsi , false ) ;
if ( ( features & NETIF_F_HW_VLAN_CTAG_TX ) & &
! ( netdev - > features & NETIF_F_HW_VLAN_CTAG_TX ) )
ret = ice_vsi_manage_vlan_insertion ( vsi ) ;
else if ( ! ( features & NETIF_F_HW_VLAN_CTAG_TX ) & &
( netdev - > features & NETIF_F_HW_VLAN_CTAG_TX ) )
ret = ice_vsi_manage_vlan_insertion ( vsi ) ;
if ( ( features & NETIF_F_HW_VLAN_CTAG_FILTER ) & &
! ( netdev - > features & NETIF_F_HW_VLAN_CTAG_FILTER ) )
ret = ice_cfg_vlan_pruning ( vsi , true , false ) ;
else if ( ! ( features & NETIF_F_HW_VLAN_CTAG_FILTER ) & &
( netdev - > features & NETIF_F_HW_VLAN_CTAG_FILTER ) )
ret = ice_cfg_vlan_pruning ( vsi , false , false ) ;
if ( ( features & NETIF_F_NTUPLE ) & &
! ( netdev - > features & NETIF_F_NTUPLE ) ) {
ice_vsi_manage_fdir ( vsi , true ) ;
ice_init_arfs ( vsi ) ;
} else if ( ! ( features & NETIF_F_NTUPLE ) & &
( netdev - > features & NETIF_F_NTUPLE ) ) {
ice_vsi_manage_fdir ( vsi , false ) ;
ice_clear_arfs ( vsi ) ;
}
return ret ;
}
/**
* ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
* @ vsi : VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup ( struct ice_vsi * vsi )
{
int ret = 0 ;
if ( vsi - > netdev - > features & NETIF_F_HW_VLAN_CTAG_RX )
ret = ice_vsi_manage_vlan_stripping ( vsi , true ) ;
if ( vsi - > netdev - > features & NETIF_F_HW_VLAN_CTAG_TX )
ret = ice_vsi_manage_vlan_insertion ( vsi ) ;
return ret ;
}
/**
* ice_vsi_cfg - Setup the VSI
* @ vsi : the VSI being configured
*
* Return 0 on success and negative value on error
*/
int ice_vsi_cfg ( struct ice_vsi * vsi )
{
int err ;
if ( vsi - > netdev ) {
ice_set_rx_mode ( vsi - > netdev ) ;
err = ice_vsi_vlan_setup ( vsi ) ;
if ( err )
return err ;
}
ice_vsi_cfg_dcb_rings ( vsi ) ;
err = ice_vsi_cfg_lan_txqs ( vsi ) ;
if ( ! err & & ice_is_xdp_ena_vsi ( vsi ) )
err = ice_vsi_cfg_xdp_txqs ( vsi ) ;
if ( ! err )
err = ice_vsi_cfg_rxqs ( vsi ) ;
return err ;
}
2021-07-12 19:01:19 +00:00
/* THEORY OF MODERATION:
* The below code creates custom DIM profiles for use by this driver , because
* the ice driver hardware works differently than the hardware that DIMLIB was
* originally made for . ice hardware doesn ' t have packet count limits that
* can trigger an interrupt , but it * does * have interrupt rate limit support ,
* and this code adds that capability to be used by the driver when it ' s using
* DIMLIB . The DIMLIB code was always designed to be a suggestion to the driver
* for how to " respond " to traffic and interrupts , so this driver uses a
* slightly different set of moderation parameters to get best performance .
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires , value is stored in microseconds .
*/
u16 itr ;
/* the rate limit for interrupts, which can cap a delay from a small
* ITR at a certain amount of interrupts per second . f . e . a 2u s ITR
* could yield as much as 500 , 000 interrupts per second , but with a
* 10u s rate limit , it limits to 100 , 000 interrupts per second . Value
* is stored in microseconds .
*/
u16 intrl ;
} ;
/* Make a different profile for Rx that doesn't allow quite so aggressive
* moderation at the high end ( it maxes out at 128u s or about 8 k interrupts a
* second . The INTRL / rate parameters here are only useful to cap small ITR
* values , which is why for larger ITR ' s - like 128 , which can only generate
* 8 k interrupts per second , there is no point to rate limit and the values
* are set to zero . The rate limit values do affect latency , and so must
* be reasonably small so to not impact latency sensitive tests .
*/
static const struct ice_dim rx_profile [ ] = {
{ 2 , 10 } ,
{ 8 , 16 } ,
{ 32 , 0 } ,
{ 96 , 0 } ,
{ 128 , 0 }
} ;
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile [ ] = {
{ 2 , 10 } ,
{ 8 , 16 } ,
{ 64 , 0 } ,
{ 128 , 0 } ,
{ 256 , 0 }
} ;
static void ice_tx_dim_work ( struct work_struct * work )
{
struct ice_ring_container * rc ;
struct ice_q_vector * q_vector ;
struct dim * dim ;
u16 itr , intrl ;
dim = container_of ( work , struct dim , work ) ;
rc = container_of ( dim , struct ice_ring_container , dim ) ;
q_vector = container_of ( rc , struct ice_q_vector , tx ) ;
if ( dim - > profile_ix > = ARRAY_SIZE ( tx_profile ) )
dim - > profile_ix = ARRAY_SIZE ( tx_profile ) - 1 ;
/* look up the values in our local table */
itr = tx_profile [ dim - > profile_ix ] . itr ;
intrl = tx_profile [ dim - > profile_ix ] . intrl ;
2021-07-20 16:20:39 +00:00
ice_trace ( tx_dim_work , q_vector , dim ) ;
2021-07-12 19:01:19 +00:00
ice_write_itr ( rc , itr ) ;
ice_write_intrl ( q_vector , intrl ) ;
dim - > state = DIM_START_MEASURE ;
}
static void ice_rx_dim_work ( struct work_struct * work )
{
struct ice_ring_container * rc ;
struct ice_q_vector * q_vector ;
struct dim * dim ;
u16 itr , intrl ;
dim = container_of ( work , struct dim , work ) ;
rc = container_of ( dim , struct ice_ring_container , dim ) ;
q_vector = container_of ( rc , struct ice_q_vector , rx ) ;
if ( dim - > profile_ix > = ARRAY_SIZE ( rx_profile ) )
dim - > profile_ix = ARRAY_SIZE ( rx_profile ) - 1 ;
/* look up the values in our local table */
itr = rx_profile [ dim - > profile_ix ] . itr ;
intrl = rx_profile [ dim - > profile_ix ] . intrl ;
2021-07-20 16:20:39 +00:00
ice_trace ( rx_dim_work , q_vector , dim ) ;
2021-07-12 19:01:19 +00:00
ice_write_itr ( rc , itr ) ;
ice_write_intrl ( q_vector , intrl ) ;
dim - > state = DIM_START_MEASURE ;
}
2021-05-26 19:09:36 +00:00
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @ vsi : the VSI being configured
*/
static void ice_napi_enable_all ( struct ice_vsi * vsi )
{
int q_idx ;
if ( ! vsi - > netdev )
return ;
ice_for_each_q_vector ( vsi , q_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ q_idx ] ;
2021-07-12 19:01:19 +00:00
INIT_WORK ( & q_vector - > tx . dim . work , ice_tx_dim_work ) ;
q_vector - > tx . dim . mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE ;
INIT_WORK ( & q_vector - > rx . dim . work , ice_rx_dim_work ) ;
q_vector - > rx . dim . mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE ;
2021-05-26 19:09:36 +00:00
if ( q_vector - > rx . ring | | q_vector - > tx . ring )
napi_enable ( & q_vector - > napi ) ;
}
}
/**
* ice_up_complete - Finish the last steps of bringing up a connection
* @ vsi : The VSI being configured
*
* Return 0 on success and negative value on error
*/
static int ice_up_complete ( struct ice_vsi * vsi )
{
struct ice_pf * pf = vsi - > back ;
int err ;
ice_vsi_cfg_msix ( vsi ) ;
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
err = ice_vsi_start_all_rx_rings ( vsi ) ;
if ( err )
return err ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_VSI_DOWN , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
ice_napi_enable_all ( vsi ) ;
ice_vsi_ena_irq ( vsi ) ;
if ( vsi - > port_info & &
( vsi - > port_info - > phy . link_info . link_info & ICE_AQ_LINK_UP ) & &
vsi - > netdev ) {
ice_print_link_msg ( vsi , true ) ;
netif_tx_start_all_queues ( vsi - > netdev ) ;
netif_carrier_on ( vsi - > netdev ) ;
}
ice_service_task_schedule ( pf ) ;
return 0 ;
}
/**
* ice_up - Bring the connection back up after being down
* @ vsi : VSI being configured
*/
int ice_up ( struct ice_vsi * vsi )
{
int err ;
err = ice_vsi_cfg ( vsi ) ;
if ( ! err )
err = ice_up_complete ( vsi ) ;
return err ;
}
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
* @ ring : Tx or Rx ring to read stats from
* @ pkts : packets stats counter
* @ bytes : bytes stats counter
*
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine .
*/
static void
ice_fetch_u64_stats_per_ring ( struct ice_ring * ring , u64 * pkts , u64 * bytes )
{
unsigned int start ;
* pkts = 0 ;
* bytes = 0 ;
if ( ! ring )
return ;
do {
start = u64_stats_fetch_begin_irq ( & ring - > syncp ) ;
* pkts = ring - > stats . pkts ;
* bytes = ring - > stats . bytes ;
} while ( u64_stats_fetch_retry_irq ( & ring - > syncp , start ) ) ;
}
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @ vsi : the VSI to be updated
* @ rings : rings to work on
* @ count : number of rings
*/
static void
ice_update_vsi_tx_ring_stats ( struct ice_vsi * vsi , struct ice_ring * * rings ,
u16 count )
{
struct rtnl_link_stats64 * vsi_stats = & vsi - > net_stats ;
u16 i ;
for ( i = 0 ; i < count ; i + + ) {
struct ice_ring * ring ;
u64 pkts , bytes ;
ring = READ_ONCE ( rings [ i ] ) ;
ice_fetch_u64_stats_per_ring ( ring , & pkts , & bytes ) ;
vsi_stats - > tx_packets + = pkts ;
vsi_stats - > tx_bytes + = bytes ;
vsi - > tx_restart + = ring - > tx_stats . restart_q ;
vsi - > tx_busy + = ring - > tx_stats . tx_busy ;
vsi - > tx_linearize + = ring - > tx_stats . tx_linearize ;
}
}
/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @ vsi : the VSI to be updated
*/
static void ice_update_vsi_ring_stats ( struct ice_vsi * vsi )
{
struct rtnl_link_stats64 * vsi_stats = & vsi - > net_stats ;
u64 pkts , bytes ;
int i ;
/* reset netdev stats */
vsi_stats - > tx_packets = 0 ;
vsi_stats - > tx_bytes = 0 ;
vsi_stats - > rx_packets = 0 ;
vsi_stats - > rx_bytes = 0 ;
/* reset non-netdev (extended) stats */
vsi - > tx_restart = 0 ;
vsi - > tx_busy = 0 ;
vsi - > tx_linearize = 0 ;
vsi - > rx_buf_failed = 0 ;
vsi - > rx_page_failed = 0 ;
rcu_read_lock ( ) ;
/* update Tx rings counters */
ice_update_vsi_tx_ring_stats ( vsi , vsi - > tx_rings , vsi - > num_txq ) ;
/* update Rx rings counters */
ice_for_each_rxq ( vsi , i ) {
2021-07-20 16:20:39 +00:00
struct ice_ring * ring = READ_ONCE ( vsi - > rx_rings [ i ] ) ;
2021-05-26 19:09:36 +00:00
ice_fetch_u64_stats_per_ring ( ring , & pkts , & bytes ) ;
vsi_stats - > rx_packets + = pkts ;
vsi_stats - > rx_bytes + = bytes ;
vsi - > rx_buf_failed + = ring - > rx_stats . alloc_buf_failed ;
vsi - > rx_page_failed + = ring - > rx_stats . alloc_page_failed ;
}
/* update XDP Tx rings counters */
if ( ice_is_xdp_ena_vsi ( vsi ) )
ice_update_vsi_tx_ring_stats ( vsi , vsi - > xdp_rings ,
vsi - > num_xdp_txq ) ;
rcu_read_unlock ( ) ;
}
/**
* ice_update_vsi_stats - Update VSI stats counters
* @ vsi : the VSI to be updated
*/
void ice_update_vsi_stats ( struct ice_vsi * vsi )
{
struct rtnl_link_stats64 * cur_ns = & vsi - > net_stats ;
struct ice_eth_stats * cur_es = & vsi - > eth_stats ;
struct ice_pf * pf = vsi - > back ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) | |
test_bit ( ICE_CFG_BUSY , pf - > state ) )
2021-05-26 19:09:36 +00:00
return ;
/* get stats as recorded by Tx/Rx rings */
ice_update_vsi_ring_stats ( vsi ) ;
/* get VSI stats as recorded by the hardware */
ice_update_eth_stats ( vsi ) ;
cur_ns - > tx_errors = cur_es - > tx_errors ;
2021-07-12 19:01:19 +00:00
cur_ns - > rx_dropped = cur_es - > rx_discards ;
2021-05-26 19:09:36 +00:00
cur_ns - > tx_dropped = cur_es - > tx_discards ;
cur_ns - > multicast = cur_es - > rx_multicast ;
/* update some more netdev stats if this is main VSI */
if ( vsi - > type = = ICE_VSI_PF ) {
cur_ns - > rx_crc_errors = pf - > stats . crc_errors ;
cur_ns - > rx_errors = pf - > stats . crc_errors +
pf - > stats . illegal_bytes +
pf - > stats . rx_len_errors +
pf - > stats . rx_undersize +
pf - > hw_csum_rx_error +
pf - > stats . rx_jabber +
pf - > stats . rx_fragments +
pf - > stats . rx_oversize ;
cur_ns - > rx_length_errors = pf - > stats . rx_len_errors ;
/* record drops from the port level */
cur_ns - > rx_missed_errors = pf - > stats . eth . rx_discards ;
}
}
/**
* ice_update_pf_stats - Update PF port stats counters
* @ pf : PF whose stats needs to be updated
*/
void ice_update_pf_stats ( struct ice_pf * pf )
{
struct ice_hw_port_stats * prev_ps , * cur_ps ;
struct ice_hw * hw = & pf - > hw ;
u16 fd_ctr_base ;
u8 port ;
port = hw - > port_info - > lport ;
prev_ps = & pf - > stats_prev ;
cur_ps = & pf - > stats ;
ice_stat_update40 ( hw , GLPRT_GORCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_bytes ,
& cur_ps - > eth . rx_bytes ) ;
ice_stat_update40 ( hw , GLPRT_UPRCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_unicast ,
& cur_ps - > eth . rx_unicast ) ;
ice_stat_update40 ( hw , GLPRT_MPRCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_multicast ,
& cur_ps - > eth . rx_multicast ) ;
ice_stat_update40 ( hw , GLPRT_BPRCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_broadcast ,
& cur_ps - > eth . rx_broadcast ) ;
ice_stat_update32 ( hw , PRTRPB_RDPC , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_discards ,
& cur_ps - > eth . rx_discards ) ;
ice_stat_update40 ( hw , GLPRT_GOTCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . tx_bytes ,
& cur_ps - > eth . tx_bytes ) ;
ice_stat_update40 ( hw , GLPRT_UPTCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . tx_unicast ,
& cur_ps - > eth . tx_unicast ) ;
ice_stat_update40 ( hw , GLPRT_MPTCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . tx_multicast ,
& cur_ps - > eth . tx_multicast ) ;
ice_stat_update40 ( hw , GLPRT_BPTCL ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > eth . tx_broadcast ,
& cur_ps - > eth . tx_broadcast ) ;
ice_stat_update32 ( hw , GLPRT_TDOLD ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_dropped_link_down ,
& cur_ps - > tx_dropped_link_down ) ;
ice_stat_update40 ( hw , GLPRT_PRC64L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_64 , & cur_ps - > rx_size_64 ) ;
ice_stat_update40 ( hw , GLPRT_PRC127L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_127 , & cur_ps - > rx_size_127 ) ;
ice_stat_update40 ( hw , GLPRT_PRC255L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_255 , & cur_ps - > rx_size_255 ) ;
ice_stat_update40 ( hw , GLPRT_PRC511L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_511 , & cur_ps - > rx_size_511 ) ;
ice_stat_update40 ( hw , GLPRT_PRC1023L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_1023 , & cur_ps - > rx_size_1023 ) ;
ice_stat_update40 ( hw , GLPRT_PRC1522L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_1522 , & cur_ps - > rx_size_1522 ) ;
ice_stat_update40 ( hw , GLPRT_PRC9522L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_size_big , & cur_ps - > rx_size_big ) ;
ice_stat_update40 ( hw , GLPRT_PTC64L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_64 , & cur_ps - > tx_size_64 ) ;
ice_stat_update40 ( hw , GLPRT_PTC127L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_127 , & cur_ps - > tx_size_127 ) ;
ice_stat_update40 ( hw , GLPRT_PTC255L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_255 , & cur_ps - > tx_size_255 ) ;
ice_stat_update40 ( hw , GLPRT_PTC511L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_511 , & cur_ps - > tx_size_511 ) ;
ice_stat_update40 ( hw , GLPRT_PTC1023L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_1023 , & cur_ps - > tx_size_1023 ) ;
ice_stat_update40 ( hw , GLPRT_PTC1522L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_1522 , & cur_ps - > tx_size_1522 ) ;
ice_stat_update40 ( hw , GLPRT_PTC9522L ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > tx_size_big , & cur_ps - > tx_size_big ) ;
fd_ctr_base = hw - > fd_ctr_base ;
ice_stat_update40 ( hw ,
GLSTAT_FD_CNT0L ( ICE_FD_SB_STAT_IDX ( fd_ctr_base ) ) ,
pf - > stat_prev_loaded , & prev_ps - > fd_sb_match ,
& cur_ps - > fd_sb_match ) ;
ice_stat_update32 ( hw , GLPRT_LXONRXC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > link_xon_rx , & cur_ps - > link_xon_rx ) ;
ice_stat_update32 ( hw , GLPRT_LXOFFRXC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > link_xoff_rx , & cur_ps - > link_xoff_rx ) ;
ice_stat_update32 ( hw , GLPRT_LXONTXC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > link_xon_tx , & cur_ps - > link_xon_tx ) ;
ice_stat_update32 ( hw , GLPRT_LXOFFTXC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > link_xoff_tx , & cur_ps - > link_xoff_tx ) ;
ice_update_dcb_stats ( pf ) ;
ice_stat_update32 ( hw , GLPRT_CRCERRS ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > crc_errors , & cur_ps - > crc_errors ) ;
ice_stat_update32 ( hw , GLPRT_ILLERRC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > illegal_bytes , & cur_ps - > illegal_bytes ) ;
ice_stat_update32 ( hw , GLPRT_MLFC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > mac_local_faults ,
& cur_ps - > mac_local_faults ) ;
ice_stat_update32 ( hw , GLPRT_MRFC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > mac_remote_faults ,
& cur_ps - > mac_remote_faults ) ;
ice_stat_update32 ( hw , GLPRT_RLEC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_len_errors , & cur_ps - > rx_len_errors ) ;
ice_stat_update32 ( hw , GLPRT_RUC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_undersize , & cur_ps - > rx_undersize ) ;
ice_stat_update32 ( hw , GLPRT_RFC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_fragments , & cur_ps - > rx_fragments ) ;
ice_stat_update32 ( hw , GLPRT_ROC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_oversize , & cur_ps - > rx_oversize ) ;
ice_stat_update32 ( hw , GLPRT_RJC ( port ) , pf - > stat_prev_loaded ,
& prev_ps - > rx_jabber , & cur_ps - > rx_jabber ) ;
cur_ps - > fd_sb_status = test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ? 1 : 0 ;
pf - > stat_prev_loaded = true ;
}
/**
* ice_get_stats64 - get statistics for network device structure
* @ netdev : network interface device structure
* @ stats : main device statistics structure
*/
static
void ice_get_stats64 ( struct net_device * netdev , struct rtnl_link_stats64 * stats )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct rtnl_link_stats64 * vsi_stats ;
struct ice_vsi * vsi = np - > vsi ;
vsi_stats = & vsi - > net_stats ;
if ( ! vsi - > num_txq | | ! vsi - > num_rxq )
return ;
/* netdev packet/byte stats come from ring counter. These are obtained
* by summing up ring counters ( done by ice_update_vsi_ring_stats ) .
* But , only call the update routine and read the registers if VSI is
* not down .
*/
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_VSI_DOWN , vsi - > state ) )
2021-05-26 19:09:36 +00:00
ice_update_vsi_ring_stats ( vsi ) ;
stats - > tx_packets = vsi_stats - > tx_packets ;
stats - > tx_bytes = vsi_stats - > tx_bytes ;
stats - > rx_packets = vsi_stats - > rx_packets ;
stats - > rx_bytes = vsi_stats - > rx_bytes ;
/* The rest of the stats can be read from the hardware but instead we
* just return values that the watchdog task has already obtained from
* the hardware .
*/
stats - > multicast = vsi_stats - > multicast ;
stats - > tx_errors = vsi_stats - > tx_errors ;
stats - > tx_dropped = vsi_stats - > tx_dropped ;
stats - > rx_errors = vsi_stats - > rx_errors ;
stats - > rx_dropped = vsi_stats - > rx_dropped ;
stats - > rx_crc_errors = vsi_stats - > rx_crc_errors ;
stats - > rx_length_errors = vsi_stats - > rx_length_errors ;
}
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @ vsi : VSI having NAPI disabled
*/
static void ice_napi_disable_all ( struct ice_vsi * vsi )
{
int q_idx ;
if ( ! vsi - > netdev )
return ;
ice_for_each_q_vector ( vsi , q_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ q_idx ] ;
if ( q_vector - > rx . ring | | q_vector - > tx . ring )
napi_disable ( & q_vector - > napi ) ;
2021-07-12 19:01:19 +00:00
cancel_work_sync ( & q_vector - > tx . dim . work ) ;
cancel_work_sync ( & q_vector - > rx . dim . work ) ;
2021-05-26 19:09:36 +00:00
}
}
/**
* ice_down - Shutdown the connection
* @ vsi : The VSI being stopped
*/
int ice_down ( struct ice_vsi * vsi )
{
int i , tx_err , rx_err , link_err = 0 ;
/* Caller of this function is expected to set the
2021-07-12 19:01:19 +00:00
* vsi - > state ICE_DOWN bit
2021-05-26 19:09:36 +00:00
*/
if ( vsi - > netdev ) {
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_disable ( vsi - > netdev ) ;
}
ice_vsi_dis_irq ( vsi ) ;
tx_err = ice_vsi_stop_lan_tx_rings ( vsi , ICE_NO_RESET , 0 ) ;
if ( tx_err )
netdev_err ( vsi - > netdev , " Failed stop Tx rings, VSI %d error %d \n " ,
vsi - > vsi_num , tx_err ) ;
if ( ! tx_err & & ice_is_xdp_ena_vsi ( vsi ) ) {
tx_err = ice_vsi_stop_xdp_tx_rings ( vsi ) ;
if ( tx_err )
netdev_err ( vsi - > netdev , " Failed stop XDP rings, VSI %d error %d \n " ,
vsi - > vsi_num , tx_err ) ;
}
rx_err = ice_vsi_stop_all_rx_rings ( vsi ) ;
if ( rx_err )
netdev_err ( vsi - > netdev , " Failed stop Rx rings, VSI %d error %d \n " ,
vsi - > vsi_num , rx_err ) ;
ice_napi_disable_all ( vsi ) ;
if ( test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , vsi - > back - > flags ) ) {
link_err = ice_force_phys_link_state ( vsi , false ) ;
if ( link_err )
netdev_err ( vsi - > netdev , " Failed to set physical link down, VSI %d error %d \n " ,
vsi - > vsi_num , link_err ) ;
}
ice_for_each_txq ( vsi , i )
ice_clean_tx_ring ( vsi - > tx_rings [ i ] ) ;
ice_for_each_rxq ( vsi , i )
ice_clean_rx_ring ( vsi - > rx_rings [ i ] ) ;
if ( tx_err | | rx_err | | link_err ) {
netdev_err ( vsi - > netdev , " Failed to close VSI 0x%04X on switch 0x%04X \n " ,
vsi - > vsi_num , vsi - > vsw - > sw_id ) ;
return - EIO ;
}
return 0 ;
}
/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @ vsi : VSI having resources allocated
*
* Return 0 on success , negative on failure
*/
int ice_vsi_setup_tx_rings ( struct ice_vsi * vsi )
{
int i , err = 0 ;
if ( ! vsi - > num_txq ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " VSI %d has 0 Tx queues \n " ,
vsi - > vsi_num ) ;
return - EINVAL ;
}
ice_for_each_txq ( vsi , i ) {
struct ice_ring * ring = vsi - > tx_rings [ i ] ;
if ( ! ring )
return - EINVAL ;
ring - > netdev = vsi - > netdev ;
err = ice_setup_tx_ring ( ring ) ;
if ( err )
break ;
}
return err ;
}
/**
* ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
* @ vsi : VSI having resources allocated
*
* Return 0 on success , negative on failure
*/
int ice_vsi_setup_rx_rings ( struct ice_vsi * vsi )
{
int i , err = 0 ;
if ( ! vsi - > num_rxq ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " VSI %d has 0 Rx queues \n " ,
vsi - > vsi_num ) ;
return - EINVAL ;
}
ice_for_each_rxq ( vsi , i ) {
struct ice_ring * ring = vsi - > rx_rings [ i ] ;
if ( ! ring )
return - EINVAL ;
ring - > netdev = vsi - > netdev ;
err = ice_setup_rx_ring ( ring ) ;
if ( err )
break ;
}
return err ;
}
/**
* ice_vsi_open_ctrl - open control VSI for use
* @ vsi : the VSI to open
*
* Initialization of the Control VSI
*
* Returns 0 on success , negative value on error
*/
int ice_vsi_open_ctrl ( struct ice_vsi * vsi )
{
char int_name [ ICE_INT_NAME_STR_LEN ] ;
struct ice_pf * pf = vsi - > back ;
struct device * dev ;
int err ;
dev = ice_pf_to_dev ( pf ) ;
/* allocate descriptors */
err = ice_vsi_setup_tx_rings ( vsi ) ;
if ( err )
goto err_setup_tx ;
err = ice_vsi_setup_rx_rings ( vsi ) ;
if ( err )
goto err_setup_rx ;
err = ice_vsi_cfg ( vsi ) ;
if ( err )
goto err_setup_rx ;
snprintf ( int_name , sizeof ( int_name ) - 1 , " %s-%s:ctrl " ,
dev_driver_string ( dev ) , dev_name ( dev ) ) ;
err = ice_vsi_req_irq_msix ( vsi , int_name ) ;
if ( err )
goto err_setup_rx ;
ice_vsi_cfg_msix ( vsi ) ;
err = ice_vsi_start_all_rx_rings ( vsi ) ;
if ( err )
goto err_up_complete ;
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_VSI_DOWN , vsi - > state ) ;
2021-05-26 19:09:36 +00:00
ice_vsi_ena_irq ( vsi ) ;
return 0 ;
err_up_complete :
ice_down ( vsi ) ;
err_setup_rx :
ice_vsi_free_rx_rings ( vsi ) ;
err_setup_tx :
ice_vsi_free_tx_rings ( vsi ) ;
return err ;
}
/**
* ice_vsi_open - Called when a network interface is made active
* @ vsi : the VSI to open
*
* Initialization of the VSI
*
* Returns 0 on success , negative value on error
*/
static int ice_vsi_open ( struct ice_vsi * vsi )
{
char int_name [ ICE_INT_NAME_STR_LEN ] ;
struct ice_pf * pf = vsi - > back ;
int err ;
/* allocate descriptors */
err = ice_vsi_setup_tx_rings ( vsi ) ;
if ( err )
goto err_setup_tx ;
err = ice_vsi_setup_rx_rings ( vsi ) ;
if ( err )
goto err_setup_rx ;
err = ice_vsi_cfg ( vsi ) ;
if ( err )
goto err_setup_rx ;
snprintf ( int_name , sizeof ( int_name ) - 1 , " %s-%s " ,
dev_driver_string ( ice_pf_to_dev ( pf ) ) , vsi - > netdev - > name ) ;
err = ice_vsi_req_irq_msix ( vsi , int_name ) ;
if ( err )
goto err_setup_rx ;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues ( vsi - > netdev , vsi - > num_txq ) ;
if ( err )
goto err_set_qs ;
err = netif_set_real_num_rx_queues ( vsi - > netdev , vsi - > num_rxq ) ;
if ( err )
goto err_set_qs ;
err = ice_up_complete ( vsi ) ;
if ( err )
goto err_up_complete ;
return 0 ;
err_up_complete :
ice_down ( vsi ) ;
err_set_qs :
ice_vsi_free_irq ( vsi ) ;
err_setup_rx :
ice_vsi_free_rx_rings ( vsi ) ;
err_setup_tx :
ice_vsi_free_tx_rings ( vsi ) ;
return err ;
}
/**
* ice_vsi_release_all - Delete all VSIs
* @ pf : PF from which all VSIs are being removed
*/
static void ice_vsi_release_all ( struct ice_pf * pf )
{
int err , i ;
if ( ! pf - > vsi )
return ;
ice_for_each_vsi ( pf , i ) {
if ( ! pf - > vsi [ i ] )
continue ;
err = ice_vsi_release ( pf - > vsi [ i ] ) ;
if ( err )
dev_dbg ( ice_pf_to_dev ( pf ) , " Failed to release pf->vsi[%d], err %d, vsi_num = %d \n " ,
i , err , pf - > vsi [ i ] - > vsi_num ) ;
}
}
/**
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @ pf : pointer to the PF instance
* @ type : VSI type to rebuild
*
* Iterates through the pf - > vsi array and rebuilds VSIs of the requested type
*/
static int ice_vsi_rebuild_by_type ( struct ice_pf * pf , enum ice_vsi_type type )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
enum ice_status status ;
int i , err ;
ice_for_each_vsi ( pf , i ) {
struct ice_vsi * vsi = pf - > vsi [ i ] ;
if ( ! vsi | | vsi - > type ! = type )
continue ;
/* rebuild the VSI */
err = ice_vsi_rebuild ( vsi , true ) ;
if ( err ) {
dev_err ( dev , " rebuild VSI failed, err %d, VSI index %d, type %s \n " ,
err , vsi - > idx , ice_vsi_type_str ( type ) ) ;
return err ;
}
/* replay filters for the VSI */
status = ice_replay_vsi ( & pf - > hw , vsi - > idx ) ;
if ( status ) {
dev_err ( dev , " replay VSI failed, status %s, VSI index %d, type %s \n " ,
ice_stat_str ( status ) , vsi - > idx ,
ice_vsi_type_str ( type ) ) ;
return - EIO ;
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi ( ) call above
*/
vsi - > vsi_num = ice_get_hw_vsi_num ( & pf - > hw , vsi - > idx ) ;
/* enable the VSI */
err = ice_ena_vsi ( vsi , false ) ;
if ( err ) {
dev_err ( dev , " enable VSI failed, err %d, VSI index %d, type %s \n " ,
err , vsi - > idx , ice_vsi_type_str ( type ) ) ;
return err ;
}
dev_info ( dev , " VSI rebuilt. VSI index %d, type %s \n " , vsi - > idx ,
ice_vsi_type_str ( type ) ) ;
}
return 0 ;
}
/**
* ice_update_pf_netdev_link - Update PF netdev link status
* @ pf : pointer to the PF instance
*/
static void ice_update_pf_netdev_link ( struct ice_pf * pf )
{
bool link_up ;
int i ;
ice_for_each_vsi ( pf , i ) {
struct ice_vsi * vsi = pf - > vsi [ i ] ;
if ( ! vsi | | vsi - > type ! = ICE_VSI_PF )
return ;
ice_get_link_status ( pf - > vsi [ i ] - > port_info , & link_up ) ;
if ( link_up ) {
netif_carrier_on ( pf - > vsi [ i ] - > netdev ) ;
netif_tx_wake_all_queues ( pf - > vsi [ i ] - > netdev ) ;
} else {
netif_carrier_off ( pf - > vsi [ i ] - > netdev ) ;
netif_tx_stop_all_queues ( pf - > vsi [ i ] - > netdev ) ;
}
}
}
/**
* ice_rebuild - rebuild after reset
* @ pf : PF to rebuild
* @ reset_type : type of reset
*
* Do not rebuild VF VSI in this flow because that is already handled via
* ice_reset_all_vfs ( ) . This is because requirements for resetting a VF after a
* PFR / CORER / GLOBER / etc . are different than the normal flow . Also , we don ' t want
* to reset / rebuild all the VF VSI twice .
*/
static void ice_rebuild ( struct ice_pf * pf , enum ice_reset_req reset_type )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
enum ice_status ret ;
int err ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) )
2021-05-26 19:09:36 +00:00
goto clear_recovery ;
dev_dbg ( dev , " rebuilding PF after reset_type=%d \n " , reset_type ) ;
ret = ice_init_all_ctrlq ( hw ) ;
if ( ret ) {
dev_err ( dev , " control queues init failed %s \n " ,
ice_stat_str ( ret ) ) ;
goto err_init_ctrlq ;
}
/* if DDP was previously loaded successfully */
if ( ! ice_is_safe_mode ( pf ) ) {
/* reload the SW DB of filter tables */
if ( reset_type = = ICE_RESET_PFR )
ice_fill_blk_tbls ( hw ) ;
else
/* Reload DDP Package after CORER/GLOBR reset */
ice_load_pkg ( NULL , pf ) ;
}
ret = ice_clear_pf_cfg ( hw ) ;
if ( ret ) {
dev_err ( dev , " clear PF configuration failed %s \n " ,
ice_stat_str ( ret ) ) ;
goto err_init_ctrlq ;
}
if ( pf - > first_sw - > dflt_vsi_ena )
dev_info ( dev , " Clearing default VSI, re-enable after reset completes \n " ) ;
/* clear the default VSI configuration if it exists */
pf - > first_sw - > dflt_vsi = NULL ;
pf - > first_sw - > dflt_vsi_ena = false ;
ice_clear_pxe_mode ( hw ) ;
2021-07-20 16:20:39 +00:00
ret = ice_init_nvm ( hw ) ;
if ( ret ) {
dev_err ( dev , " ice_init_nvm failed %s \n " , ice_stat_str ( ret ) ) ;
goto err_init_ctrlq ;
}
2021-05-26 19:09:36 +00:00
ret = ice_get_caps ( hw ) ;
if ( ret ) {
dev_err ( dev , " ice_get_caps failed %s \n " , ice_stat_str ( ret ) ) ;
goto err_init_ctrlq ;
}
ret = ice_aq_set_mac_cfg ( hw , ICE_AQ_SET_MAC_FRAME_SIZE_MAX , NULL ) ;
if ( ret ) {
dev_err ( dev , " set_mac_cfg failed %s \n " , ice_stat_str ( ret ) ) ;
goto err_init_ctrlq ;
}
err = ice_sched_init_port ( hw - > port_info ) ;
if ( err )
goto err_sched_init_port ;
/* start misc vector */
err = ice_req_irq_msix_misc ( pf ) ;
if ( err ) {
dev_err ( dev , " misc vector setup failed: %d \n " , err ) ;
goto err_sched_init_port ;
}
if ( test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ) {
wr32 ( hw , PFQF_FD_ENA , PFQF_FD_ENA_FD_ENA_M ) ;
if ( ! rd32 ( hw , PFQF_FD_SIZE ) ) {
u16 unused , guar , b_effort ;
guar = hw - > func_caps . fd_fltr_guar ;
b_effort = hw - > func_caps . fd_fltr_best_effort ;
/* force guaranteed filter pool for PF */
ice_alloc_fd_guar_item ( hw , & unused , guar ) ;
/* force shared filter pool for PF */
ice_alloc_fd_shrd_item ( hw , & unused , b_effort ) ;
}
}
if ( test_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) )
ice_dcb_rebuild ( pf ) ;
2021-07-20 16:20:39 +00:00
/* If the PF previously had enabled PTP, PTP init needs to happen before
* the VSI rebuild . If not , this causes the PTP link status events to
* fail .
*/
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_init ( pf ) ;
2021-05-26 19:09:36 +00:00
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type ( pf , ICE_VSI_PF ) ;
if ( err ) {
dev_err ( dev , " PF VSI rebuild failed: %d \n " , err ) ;
goto err_vsi_rebuild ;
}
/* If Flow Director is active */
if ( test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ) {
err = ice_vsi_rebuild_by_type ( pf , ICE_VSI_CTRL ) ;
if ( err ) {
dev_err ( dev , " control VSI rebuild failed: %d \n " , err ) ;
goto err_vsi_rebuild ;
}
/* replay HW Flow Director recipes */
if ( hw - > fdir_prof )
ice_fdir_replay_flows ( hw ) ;
/* replay Flow Director filters */
ice_fdir_replay_fltrs ( pf ) ;
ice_rebuild_arfs ( pf ) ;
}
ice_update_pf_netdev_link ( pf ) ;
/* tell the firmware we are up */
ret = ice_send_version ( pf ) ;
if ( ret ) {
dev_err ( dev , " Rebuild failed due to error sending driver version: %s \n " ,
ice_stat_str ( ret ) ) ;
goto err_vsi_rebuild ;
}
ice_replay_post ( hw ) ;
/* if we get here, reset flow is successful */
2021-07-12 19:01:19 +00:00
clear_bit ( ICE_RESET_FAILED , pf - > state ) ;
2021-07-20 16:20:39 +00:00
ice_plug_aux_dev ( pf ) ;
2021-05-26 19:09:36 +00:00
return ;
err_vsi_rebuild :
err_sched_init_port :
ice_sched_cleanup_all ( hw ) ;
err_init_ctrlq :
ice_shutdown_all_ctrlq ( hw ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
2021-05-26 19:09:36 +00:00
clear_recovery :
/* set this bit in PF state to control service task scheduling */
2021-07-12 19:01:19 +00:00
set_bit ( ICE_NEEDS_RESTART , pf - > state ) ;
2021-05-26 19:09:36 +00:00
dev_err ( dev , " Rebuild failed, unload and reload driver \n " ) ;
}
/**
* ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
* @ vsi : Pointer to VSI structure
*/
static int ice_max_xdp_frame_size ( struct ice_vsi * vsi )
{
if ( PAGE_SIZE > = 8192 | | test_bit ( ICE_FLAG_LEGACY_RX , vsi - > back - > flags ) )
return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM ;
else
return ICE_RXBUF_3072 ;
}
/**
* ice_change_mtu - NDO callback to change the MTU
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
*/
static int ice_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
2021-07-20 16:20:39 +00:00
struct iidc_event * event ;
2021-05-26 19:09:36 +00:00
u8 count = 0 ;
2021-07-20 16:20:39 +00:00
int err = 0 ;
2021-05-26 19:09:36 +00:00
if ( new_mtu = = ( int ) netdev - > mtu ) {
netdev_warn ( netdev , " MTU is already %u \n " , netdev - > mtu ) ;
return 0 ;
}
if ( ice_is_xdp_ena_vsi ( vsi ) ) {
int frame_size = ice_max_xdp_frame_size ( vsi ) ;
if ( new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size ) {
netdev_err ( netdev , " max MTU for XDP usage is %d \n " ,
frame_size - ICE_ETH_PKT_HDR_PAD ) ;
return - EINVAL ;
}
}
/* if a reset is in progress, wait for some time for it to complete */
do {
if ( ice_is_reset_in_progress ( pf - > state ) ) {
count + + ;
usleep_range ( 1000 , 2000 ) ;
} else {
break ;
}
} while ( count < 100 ) ;
if ( count = = 100 ) {
netdev_err ( netdev , " can't change MTU. Device is busy \n " ) ;
return - EBUSY ;
}
2021-07-20 16:20:39 +00:00
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
if ( ! event )
return - ENOMEM ;
set_bit ( IIDC_EVENT_BEFORE_MTU_CHANGE , event - > type ) ;
ice_send_event_to_aux ( pf , event ) ;
clear_bit ( IIDC_EVENT_BEFORE_MTU_CHANGE , event - > type ) ;
2021-05-26 19:09:36 +00:00
netdev - > mtu = ( unsigned int ) new_mtu ;
/* if VSI is up, bring it down and then back up */
2021-07-12 19:01:19 +00:00
if ( ! test_and_set_bit ( ICE_VSI_DOWN , vsi - > state ) ) {
2021-05-26 19:09:36 +00:00
err = ice_down ( vsi ) ;
if ( err ) {
netdev_err ( netdev , " change MTU if_down err %d \n " , err ) ;
2021-07-20 16:20:39 +00:00
goto event_after ;
2021-05-26 19:09:36 +00:00
}
err = ice_up ( vsi ) ;
if ( err ) {
netdev_err ( netdev , " change MTU if_up err %d \n " , err ) ;
2021-07-20 16:20:39 +00:00
goto event_after ;
2021-05-26 19:09:36 +00:00
}
}
netdev_dbg ( netdev , " changed MTU to %d \n " , new_mtu ) ;
2021-07-20 16:20:39 +00:00
event_after :
set_bit ( IIDC_EVENT_AFTER_MTU_CHANGE , event - > type ) ;
ice_send_event_to_aux ( pf , event ) ;
kfree ( event ) ;
return err ;
}
/**
* ice_do_ioctl - Access the hwtstamp interface
* @ netdev : network interface device structure
* @ ifr : interface request data
* @ cmd : ioctl command
*/
static int ice_do_ioctl ( struct net_device * netdev , struct ifreq * ifr , int cmd )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_pf * pf = np - > vsi - > back ;
switch ( cmd ) {
case SIOCGHWTSTAMP :
return ice_ptp_get_ts_config ( pf , ifr ) ;
case SIOCSHWTSTAMP :
return ice_ptp_set_ts_config ( pf , ifr ) ;
default :
return - EOPNOTSUPP ;
}
2021-05-26 19:09:36 +00:00
}
/**
* ice_aq_str - convert AQ err code to a string
* @ aq_err : the AQ error code to convert
*/
const char * ice_aq_str ( enum ice_aq_err aq_err )
{
switch ( aq_err ) {
case ICE_AQ_RC_OK :
return " OK " ;
case ICE_AQ_RC_EPERM :
return " ICE_AQ_RC_EPERM " ;
case ICE_AQ_RC_ENOENT :
return " ICE_AQ_RC_ENOENT " ;
case ICE_AQ_RC_ENOMEM :
return " ICE_AQ_RC_ENOMEM " ;
case ICE_AQ_RC_EBUSY :
return " ICE_AQ_RC_EBUSY " ;
case ICE_AQ_RC_EEXIST :
return " ICE_AQ_RC_EEXIST " ;
case ICE_AQ_RC_EINVAL :
return " ICE_AQ_RC_EINVAL " ;
case ICE_AQ_RC_ENOSPC :
return " ICE_AQ_RC_ENOSPC " ;
case ICE_AQ_RC_ENOSYS :
return " ICE_AQ_RC_ENOSYS " ;
case ICE_AQ_RC_EMODE :
return " ICE_AQ_RC_EMODE " ;
case ICE_AQ_RC_ENOSEC :
return " ICE_AQ_RC_ENOSEC " ;
case ICE_AQ_RC_EBADSIG :
return " ICE_AQ_RC_EBADSIG " ;
case ICE_AQ_RC_ESVN :
return " ICE_AQ_RC_ESVN " ;
case ICE_AQ_RC_EBADMAN :
return " ICE_AQ_RC_EBADMAN " ;
case ICE_AQ_RC_EBADBUF :
return " ICE_AQ_RC_EBADBUF " ;
}
return " ICE_AQ_RC_UNKNOWN " ;
}
/**
* ice_stat_str - convert status err code to a string
* @ stat_err : the status error code to convert
*/
const char * ice_stat_str ( enum ice_status stat_err )
{
switch ( stat_err ) {
case ICE_SUCCESS :
return " OK " ;
case ICE_ERR_PARAM :
return " ICE_ERR_PARAM " ;
case ICE_ERR_NOT_IMPL :
return " ICE_ERR_NOT_IMPL " ;
case ICE_ERR_NOT_READY :
return " ICE_ERR_NOT_READY " ;
case ICE_ERR_NOT_SUPPORTED :
return " ICE_ERR_NOT_SUPPORTED " ;
case ICE_ERR_BAD_PTR :
return " ICE_ERR_BAD_PTR " ;
case ICE_ERR_INVAL_SIZE :
return " ICE_ERR_INVAL_SIZE " ;
case ICE_ERR_DEVICE_NOT_SUPPORTED :
return " ICE_ERR_DEVICE_NOT_SUPPORTED " ;
case ICE_ERR_RESET_FAILED :
return " ICE_ERR_RESET_FAILED " ;
case ICE_ERR_FW_API_VER :
return " ICE_ERR_FW_API_VER " ;
case ICE_ERR_NO_MEMORY :
return " ICE_ERR_NO_MEMORY " ;
case ICE_ERR_CFG :
return " ICE_ERR_CFG " ;
case ICE_ERR_OUT_OF_RANGE :
return " ICE_ERR_OUT_OF_RANGE " ;
case ICE_ERR_ALREADY_EXISTS :
return " ICE_ERR_ALREADY_EXISTS " ;
case ICE_ERR_NVM :
return " ICE_ERR_NVM " ;
case ICE_ERR_NVM_CHECKSUM :
return " ICE_ERR_NVM_CHECKSUM " ;
case ICE_ERR_BUF_TOO_SHORT :
return " ICE_ERR_BUF_TOO_SHORT " ;
case ICE_ERR_NVM_BLANK_MODE :
return " ICE_ERR_NVM_BLANK_MODE " ;
case ICE_ERR_IN_USE :
return " ICE_ERR_IN_USE " ;
case ICE_ERR_MAX_LIMIT :
return " ICE_ERR_MAX_LIMIT " ;
case ICE_ERR_RESET_ONGOING :
return " ICE_ERR_RESET_ONGOING " ;
case ICE_ERR_HW_TABLE :
return " ICE_ERR_HW_TABLE " ;
case ICE_ERR_DOES_NOT_EXIST :
return " ICE_ERR_DOES_NOT_EXIST " ;
case ICE_ERR_FW_DDP_MISMATCH :
return " ICE_ERR_FW_DDP_MISMATCH " ;
case ICE_ERR_AQ_ERROR :
return " ICE_ERR_AQ_ERROR " ;
case ICE_ERR_AQ_TIMEOUT :
return " ICE_ERR_AQ_TIMEOUT " ;
case ICE_ERR_AQ_FULL :
return " ICE_ERR_AQ_FULL " ;
case ICE_ERR_AQ_NO_WORK :
return " ICE_ERR_AQ_NO_WORK " ;
case ICE_ERR_AQ_EMPTY :
return " ICE_ERR_AQ_EMPTY " ;
case ICE_ERR_AQ_FW_CRITICAL :
return " ICE_ERR_AQ_FW_CRITICAL " ;
}
return " ICE_ERR_UNKNOWN " ;
}
/**
2021-07-12 19:01:19 +00:00
* ice_set_rss_lut - Set RSS LUT
2021-05-26 19:09:36 +00:00
* @ vsi : Pointer to VSI structure
* @ lut : Lookup table
* @ lut_size : Lookup table size
*
* Returns 0 on success , negative on failure
*/
2021-07-12 19:01:19 +00:00
int ice_set_rss_lut ( struct ice_vsi * vsi , u8 * lut , u16 lut_size )
2021-05-26 19:09:36 +00:00
{
2021-07-12 19:01:19 +00:00
struct ice_aq_get_set_rss_lut_params params = { } ;
struct ice_hw * hw = & vsi - > back - > hw ;
2021-05-26 19:09:36 +00:00
enum ice_status status ;
2021-07-12 19:01:19 +00:00
if ( ! lut )
return - EINVAL ;
2021-05-26 19:09:36 +00:00
2021-07-12 19:01:19 +00:00
params . vsi_handle = vsi - > idx ;
params . lut_size = lut_size ;
params . lut_type = vsi - > rss_lut_type ;
params . lut = lut ;
2021-05-26 19:09:36 +00:00
2021-07-12 19:01:19 +00:00
status = ice_aq_set_rss_lut ( hw , & params ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot set RSS lut, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
return - EIO ;
2021-05-26 19:09:36 +00:00
}
2021-07-12 19:01:19 +00:00
return 0 ;
}
/**
* ice_set_rss_key - Set RSS key
* @ vsi : Pointer to the VSI structure
* @ seed : RSS hash seed
*
* Returns 0 on success , negative on failure
*/
int ice_set_rss_key ( struct ice_vsi * vsi , u8 * seed )
{
struct ice_hw * hw = & vsi - > back - > hw ;
enum ice_status status ;
if ( ! seed )
return - EINVAL ;
status = ice_aq_set_rss_key ( hw , vsi - > idx , ( struct ice_aqc_get_set_rss_keys * ) seed ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot set RSS key, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
return - EIO ;
2021-05-26 19:09:36 +00:00
}
return 0 ;
}
/**
2021-07-12 19:01:19 +00:00
* ice_get_rss_lut - Get RSS LUT
2021-05-26 19:09:36 +00:00
* @ vsi : Pointer to VSI structure
* @ lut : Buffer to store the lookup table entries
* @ lut_size : Size of buffer to store the lookup table entries
*
* Returns 0 on success , negative on failure
*/
2021-07-12 19:01:19 +00:00
int ice_get_rss_lut ( struct ice_vsi * vsi , u8 * lut , u16 lut_size )
2021-05-26 19:09:36 +00:00
{
2021-07-12 19:01:19 +00:00
struct ice_aq_get_set_rss_lut_params params = { } ;
struct ice_hw * hw = & vsi - > back - > hw ;
2021-05-26 19:09:36 +00:00
enum ice_status status ;
2021-07-12 19:01:19 +00:00
if ( ! lut )
return - EINVAL ;
2021-05-26 19:09:36 +00:00
2021-07-12 19:01:19 +00:00
params . vsi_handle = vsi - > idx ;
params . lut_size = lut_size ;
params . lut_type = vsi - > rss_lut_type ;
params . lut = lut ;
status = ice_aq_get_rss_lut ( hw , & params ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot get RSS lut, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
return - EIO ;
2021-05-26 19:09:36 +00:00
}
2021-07-12 19:01:19 +00:00
return 0 ;
}
/**
* ice_get_rss_key - Get RSS key
* @ vsi : Pointer to VSI structure
* @ seed : Buffer to store the key in
*
* Returns 0 on success , negative on failure
*/
int ice_get_rss_key ( struct ice_vsi * vsi , u8 * seed )
{
struct ice_hw * hw = & vsi - > back - > hw ;
enum ice_status status ;
if ( ! seed )
return - EINVAL ;
status = ice_aq_get_rss_key ( hw , vsi - > idx , ( struct ice_aqc_get_set_rss_keys * ) seed ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot get RSS key, err %s aq_err %s \n " ,
ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
return - EIO ;
2021-05-26 19:09:36 +00:00
}
return 0 ;
}
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @ skb : skb buff
* @ pid : process ID
* @ seq : RTNL message seq
* @ dev : the netdev being configured
* @ filter_mask : filter mask passed in
* @ nlflags : netlink flags passed in
*
* Return the bridge mode ( VEB / VEPA )
*/
static int
ice_bridge_getlink ( struct sk_buff * skb , u32 pid , u32 seq ,
struct net_device * dev , u32 filter_mask , int nlflags )
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
u16 bmode ;
bmode = pf - > first_sw - > bridge_mode ;
return ndo_dflt_bridge_getlink ( skb , pid , seq , dev , bmode , 0 , 0 , nlflags ,
filter_mask , NULL ) ;
}
/**
* ice_vsi_update_bridge_mode - Update VSI for switching bridge mode ( VEB / VEPA )
* @ vsi : Pointer to VSI structure
* @ bmode : Hardware bridge mode ( VEB / VEPA )
*
* Returns 0 on success , negative on failure
*/
static int ice_vsi_update_bridge_mode ( struct ice_vsi * vsi , u16 bmode )
{
struct ice_aqc_vsi_props * vsi_props ;
struct ice_hw * hw = & vsi - > back - > hw ;
struct ice_vsi_ctx * ctxt ;
enum ice_status status ;
int ret = 0 ;
vsi_props = & vsi - > info ;
ctxt = kzalloc ( sizeof ( * ctxt ) , GFP_KERNEL ) ;
if ( ! ctxt )
return - ENOMEM ;
ctxt - > info = vsi - > info ;
if ( bmode = = BRIDGE_MODE_VEB )
/* change from VEPA to VEB mode */
ctxt - > info . sw_flags | = ICE_AQ_VSI_SW_FLAG_ALLOW_LB ;
else
/* change from VEB to VEPA mode */
ctxt - > info . sw_flags & = ~ ICE_AQ_VSI_SW_FLAG_ALLOW_LB ;
ctxt - > info . valid_sections = cpu_to_le16 ( ICE_AQ_VSI_PROP_SW_VALID ) ;
status = ice_update_vsi ( hw , vsi - > idx , ctxt , NULL ) ;
if ( status ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " update VSI for bridge mode failed, bmode = %d err %s aq_err %s \n " ,
bmode , ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
ret = - EIO ;
goto out ;
}
/* Update sw flags for book keeping */
vsi_props - > sw_flags = ctxt - > info . sw_flags ;
out :
kfree ( ctxt ) ;
return ret ;
}
/**
* ice_bridge_setlink - Set the hardware bridge mode
* @ dev : the netdev being configured
* @ nlh : RTNL message
* @ flags : bridge setlink flags
* @ extack : netlink extended ack
*
* Sets the bridge mode ( VEB / VEPA ) of the switch to which the netdev ( VSI ) is
* hooked up to . Iterates through the PF VSI list and sets the loopback mode ( if
* not already set for all VSIs connected to this switch . And also update the
* unicast switch filter rules for the corresponding switch of the netdev .
*/
static int
ice_bridge_setlink ( struct net_device * dev , struct nlmsghdr * nlh ,
u16 __always_unused flags ,
struct netlink_ext_ack __always_unused * extack )
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_pf * pf = np - > vsi - > back ;
struct nlattr * attr , * br_spec ;
struct ice_hw * hw = & pf - > hw ;
enum ice_status status ;
struct ice_sw * pf_sw ;
int rem , v , err = 0 ;
pf_sw = pf - > first_sw ;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr ( nlh , sizeof ( struct ifinfomsg ) , IFLA_AF_SPEC ) ;
nla_for_each_nested ( attr , br_spec , rem ) {
__u16 mode ;
if ( nla_type ( attr ) ! = IFLA_BRIDGE_MODE )
continue ;
mode = nla_get_u16 ( attr ) ;
if ( mode ! = BRIDGE_MODE_VEPA & & mode ! = BRIDGE_MODE_VEB )
return - EINVAL ;
/* Continue if bridge mode is not being flipped */
if ( mode = = pf_sw - > bridge_mode )
continue ;
/* Iterates through the PF VSI list and update the loopback
* mode of the VSI
*/
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
err = ice_vsi_update_bridge_mode ( pf - > vsi [ v ] , mode ) ;
if ( err )
return err ;
}
hw - > evb_veb = ( mode = = BRIDGE_MODE_VEB ) ;
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
status = ice_update_sw_rule_bridge_mode ( hw ) ;
if ( status ) {
netdev_err ( dev , " switch rule update failed, mode = %d err %s aq_err %s \n " ,
mode , ice_stat_str ( status ) ,
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
/* revert hw->evb_veb */
hw - > evb_veb = ( pf_sw - > bridge_mode = = BRIDGE_MODE_VEB ) ;
return - EIO ;
}
pf_sw - > bridge_mode = mode ;
}
return 0 ;
}
/**
* ice_tx_timeout - Respond to a Tx Hang
* @ netdev : network interface device structure
* @ txqueue : Tx queue
*/
static void ice_tx_timeout ( struct net_device * netdev , unsigned int txqueue )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_ring * tx_ring = NULL ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
u32 i ;
pf - > tx_timeout_count + + ;
/* Check if PFC is enabled for the TC to which the queue belongs
* to . If yes then Tx timeout is not caused by a hung queue , no
* need to reset and rebuild
*/
if ( ice_is_pfc_causing_hung_q ( pf , txqueue ) ) {
dev_info ( ice_pf_to_dev ( pf ) , " Fake Tx hang detected on queue %u, timeout caused by PFC storm \n " ,
txqueue ) ;
return ;
}
/* now that we have an index, find the tx_ring struct */
for ( i = 0 ; i < vsi - > num_txq ; i + + )
if ( vsi - > tx_rings [ i ] & & vsi - > tx_rings [ i ] - > desc )
if ( txqueue = = vsi - > tx_rings [ i ] - > q_index ) {
tx_ring = vsi - > tx_rings [ i ] ;
break ;
}
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period .
*/
if ( time_after ( jiffies , ( pf - > tx_timeout_last_recovery + HZ * 20 ) ) )
pf - > tx_timeout_recovery_level = 1 ;
else if ( time_before ( jiffies , ( pf - > tx_timeout_last_recovery +
netdev - > watchdog_timeo ) ) )
return ;
if ( tx_ring ) {
struct ice_hw * hw = & pf - > hw ;
u32 head , val = 0 ;
head = ( rd32 ( hw , QTX_COMM_HEAD ( vsi - > txq_map [ txqueue ] ) ) &
QTX_COMM_HEAD_HEAD_M ) > > QTX_COMM_HEAD_HEAD_S ;
/* Read interrupt register */
val = rd32 ( hw , GLINT_DYN_CTL ( tx_ring - > q_vector - > reg_idx ) ) ;
netdev_info ( netdev , " tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x \n " ,
vsi - > vsi_num , txqueue , tx_ring - > next_to_clean ,
head , tx_ring - > next_to_use , val ) ;
}
pf - > tx_timeout_last_recovery = jiffies ;
netdev_info ( netdev , " tx_timeout recovery level %d, txqueue %u \n " ,
pf - > tx_timeout_recovery_level , txqueue ) ;
switch ( pf - > tx_timeout_recovery_level ) {
case 1 :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
case 2 :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_CORER_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
case 3 :
2021-07-12 19:01:19 +00:00
set_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
default :
netdev_err ( netdev , " tx_timeout recovery unsuccessful, device is in unrecoverable state. \n " ) ;
2021-07-12 19:01:19 +00:00
set_bit ( ICE_DOWN , pf - > state ) ;
set_bit ( ICE_VSI_NEEDS_RESTART , vsi - > state ) ;
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2021-05-26 19:09:36 +00:00
break ;
}
ice_service_task_schedule ( pf ) ;
pf - > tx_timeout_recovery_level + + ;
}
/**
* ice_open - Called when a network interface becomes active
* @ netdev : network interface device structure
*
* The open entry point is called when a network interface is made
* active by the system ( IFF_UP ) . At this point all resources needed
* for transmit and receive operations are allocated , the interrupt
* handler is registered with the OS , the netdev watchdog is enabled ,
* and the stack is notified that the interface is ready .
*
* Returns 0 on success , negative value on failure
*/
int ice_open ( struct net_device * netdev )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_pf * pf = np - > vsi - > back ;
if ( ice_is_reset_in_progress ( pf - > state ) ) {
netdev_err ( netdev , " can't open net device while reset is in progress " ) ;
return - EBUSY ;
}
return ice_open_internal ( netdev ) ;
}
/**
* ice_open_internal - Called when a network interface becomes active
* @ netdev : network interface device structure
*
* Internal ice_open implementation . Should not be used directly except for ice_open and reset
* handling routine
*
* Returns 0 on success , negative value on failure
*/
int ice_open_internal ( struct net_device * netdev )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
struct ice_port_info * pi ;
2021-07-12 19:01:19 +00:00
enum ice_status status ;
2021-05-26 19:09:36 +00:00
int err ;
2021-07-12 19:01:19 +00:00
if ( test_bit ( ICE_NEEDS_RESTART , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
netdev_err ( netdev , " driver needs to be unloaded and reloaded \n " ) ;
return - EIO ;
}
netif_carrier_off ( netdev ) ;
pi = vsi - > port_info ;
2021-07-12 19:01:19 +00:00
status = ice_update_link_info ( pi ) ;
if ( status ) {
netdev_err ( netdev , " Failed to get link info, error %s \n " ,
ice_stat_str ( status ) ) ;
return - EIO ;
2021-05-26 19:09:36 +00:00
}
2021-07-20 16:20:39 +00:00
ice_check_module_power ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-26 19:09:36 +00:00
/* Set PHY if there is media, otherwise, turn off PHY */
if ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) {
clear_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-07-12 19:01:19 +00:00
if ( ! test_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) ) {
2021-05-26 19:09:36 +00:00
err = ice_init_phy_user_cfg ( pi ) ;
if ( err ) {
netdev_err ( netdev , " Failed to initialize PHY settings, error %d \n " ,
err ) ;
return err ;
}
}
err = ice_configure_phy ( vsi ) ;
if ( err ) {
netdev_err ( netdev , " Failed to set physical link up, error %d \n " ,
err ) ;
return err ;
}
} else {
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-07-12 19:01:19 +00:00
ice_set_link ( vsi , false ) ;
2021-05-26 19:09:36 +00:00
}
err = ice_vsi_open ( vsi ) ;
if ( err )
netdev_err ( netdev , " Failed to open VSI 0x%04X on switch 0x%04X \n " ,
vsi - > vsi_num , vsi - > vsw - > sw_id ) ;
/* Update existing tunnels information */
udp_tunnel_get_rx_info ( netdev ) ;
return err ;
}
/**
* ice_stop - Disables a network interface
* @ netdev : network interface device structure
*
* The stop entry point is called when an interface is de - activated by the OS ,
* and the netdevice enters the DOWN state . The hardware is still under the
* driver ' s control , but the netdev interface is disabled .
*
* Returns success only - not allowed to fail
*/
int ice_stop ( struct net_device * netdev )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
if ( ice_is_reset_in_progress ( pf - > state ) ) {
netdev_err ( netdev , " can't stop net device while reset is in progress " ) ;
return - EBUSY ;
}
ice_vsi_close ( vsi ) ;
return 0 ;
}
/**
* ice_features_check - Validate encapsulated packet conforms to limits
* @ skb : skb buffer
* @ netdev : This port ' s netdev
* @ features : Offload features that the stack believes apply
*/
static netdev_features_t
ice_features_check ( struct sk_buff * skb ,
struct net_device __always_unused * netdev ,
netdev_features_t features )
{
size_t len ;
/* No point in doing any of this if neither checksum nor GSO are
* being requested for this frame . We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
return features ;
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes . If it is then we need to drop support for GSO .
*/
if ( skb_is_gso ( skb ) & & ( skb_shinfo ( skb ) - > gso_size < 64 ) )
features & = ~ NETIF_F_GSO_MASK ;
len = skb_network_header ( skb ) - skb - > data ;
if ( len > ICE_TXD_MACLEN_MAX | | len & 0x1 )
goto out_rm_features ;
len = skb_transport_header ( skb ) - skb_network_header ( skb ) ;
if ( len > ICE_TXD_IPLEN_MAX | | len & 0x1 )
goto out_rm_features ;
if ( skb - > encapsulation ) {
len = skb_inner_network_header ( skb ) - skb_transport_header ( skb ) ;
if ( len > ICE_TXD_L4LEN_MAX | | len & 0x1 )
goto out_rm_features ;
len = skb_inner_transport_header ( skb ) -
skb_inner_network_header ( skb ) ;
if ( len > ICE_TXD_IPLEN_MAX | | len & 0x1 )
goto out_rm_features ;
}
return features ;
out_rm_features :
return features & ~ ( NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK ) ;
}
static const struct net_device_ops ice_netdev_safe_mode_ops = {
. ndo_open = ice_open ,
. ndo_stop = ice_stop ,
. ndo_start_xmit = ice_start_xmit ,
. ndo_set_mac_address = ice_set_mac_address ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_change_mtu = ice_change_mtu ,
. ndo_get_stats64 = ice_get_stats64 ,
. ndo_tx_timeout = ice_tx_timeout ,
2021-07-12 19:01:19 +00:00
. ndo_bpf = ice_xdp_safe_mode ,
2021-05-26 19:09:36 +00:00
} ;
static const struct net_device_ops ice_netdev_ops = {
. ndo_open = ice_open ,
. ndo_stop = ice_stop ,
. ndo_start_xmit = ice_start_xmit ,
. ndo_features_check = ice_features_check ,
. ndo_set_rx_mode = ice_set_rx_mode ,
. ndo_set_mac_address = ice_set_mac_address ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_change_mtu = ice_change_mtu ,
. ndo_get_stats64 = ice_get_stats64 ,
. ndo_set_tx_maxrate = ice_set_tx_maxrate ,
2021-07-20 16:20:39 +00:00
. ndo_do_ioctl = ice_do_ioctl ,
2021-05-26 19:09:36 +00:00
. ndo_set_vf_spoofchk = ice_set_vf_spoofchk ,
. ndo_set_vf_mac = ice_set_vf_mac ,
. ndo_get_vf_config = ice_get_vf_cfg ,
. ndo_set_vf_trust = ice_set_vf_trust ,
. ndo_set_vf_vlan = ice_set_vf_port_vlan ,
. ndo_set_vf_link_state = ice_set_vf_link_state ,
. ndo_get_vf_stats = ice_get_vf_stats ,
. ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid ,
. ndo_set_features = ice_set_features ,
. ndo_bridge_getlink = ice_bridge_getlink ,
. ndo_bridge_setlink = ice_bridge_setlink ,
. ndo_fdb_add = ice_fdb_add ,
. ndo_fdb_del = ice_fdb_del ,
# ifdef CONFIG_RFS_ACCEL
. ndo_rx_flow_steer = ice_rx_flow_steer ,
# endif
. ndo_tx_timeout = ice_tx_timeout ,
. ndo_bpf = ice_xdp ,
. ndo_xdp_xmit = ice_xdp_xmit ,
. ndo_xsk_wakeup = ice_xsk_wakeup ,
} ;