2021-05-26 19:09:36 +00:00
/*
* Copyright 2000 VA Linux Systems , Inc . , Sunnyvale , California .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* VA LINUX SYSTEMS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# include <drm/amdgpu_drm.h>
# include <drm/drm_drv.h>
# include <drm/drm_gem.h>
# include <drm/drm_vblank.h>
# include <drm/drm_managed.h>
# include "amdgpu_drv.h"
# include <drm/drm_pciids.h>
# include <linux/console.h>
# include <linux/module.h>
# include <linux/pm_runtime.h>
# include <linux/vga_switcheroo.h>
# include <drm/drm_probe_helper.h>
# include <linux/mmu_notifier.h>
# include "amdgpu.h"
# include "amdgpu_irq.h"
# include "amdgpu_dma_buf.h"
2021-09-23 16:59:15 +00:00
2021-05-26 19:09:36 +00:00
# include "amdgpu_amdkfd.h"
# include "amdgpu_ras.h"
/*
* KMS wrapper .
* - 3.0 .0 - initial driver
* - 3.1 .0 - allow reading more status registers ( GRBM , SRBM , SDMA , CP )
* - 3.2 .0 - GFX8 : Uses EOP_TC_WB_ACTION_EN , so UMDs don ' t have to do the same
* at the end of IBs .
* - 3.3 .0 - Add VM support for UVD on supported hardware .
* - 3.4 .0 - Add AMDGPU_INFO_NUM_EVICTIONS .
* - 3.5 .0 - Add support for new UVD_NO_OP register .
* - 3.6 .0 - kmd involves use CONTEXT_CONTROL in ring buffer .
* - 3.7 .0 - Add support for VCE clock list packet
* - 3.8 .0 - Add support raster config init in the kernel
* - 3.9 .0 - Add support for memory query info about VRAM and GTT .
* - 3.10 .0 - Add support for new fences ioctl , new gem ioctl flags
* - 3.11 .0 - Add support for sensor query info ( clocks , temp , etc ) .
* - 3.12 .0 - Add query for double offchip LDS buffers
* - 3.13 .0 - Add PRT support
* - 3.14 .0 - Fix race in amdgpu_ctx_get_fence ( ) and note new functionality
* - 3.15 .0 - Export more gpu info for gfx9
* - 3.16 .0 - Add reserved vmid support
* - 3.17 .0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS .
* - 3.18 .0 - Export gpu always on cu bitmap
* - 3.19 .0 - Add support for UVD MJPEG decode
* - 3.20 .0 - Add support for local BOs
* - 3.21 .0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22 .0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23 .0 - Add query for VRAM lost counter
* - 3.24 .0 - Add high priority compute support for gfx9
* - 3.25 .0 - Add support for sensor query info ( stable pstate sclk / mclk ) .
* - 3.26 .0 - GFX9 : Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE .
* - 3.27 .0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation .
* - 3.28 .0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29 .0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30 .0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE .
* - 3.31 .0 - Add support for per - flip tiling attribute changes with DC
* - 3.32 .0 - Add syncobj timeline support to AMDGPU_CS .
* - 3.33 .0 - Fixes for GDS ENOMEM failures in AMDGPU_CS .
* - 3.34 .0 - Non - DC can flip correctly between buffers with different pitches
* - 3.35 .0 - Add drm_amdgpu_info_device : : tcc_disabled_mask
* - 3.36 .0 - Allow reading more status registers on si / cik
* - 3.37 .0 - L2 is invalidated before SDMA IBs , needed for correctness
* - 3.38 .0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39 .0 - DMABUF implicit sync does a full pipeline sync
* - 3.40 .0 - Add AMDGPU_IDS_FLAGS_TMZ
*/
# define KMS_DRIVER_MAJOR 3
2021-09-23 16:59:15 +00:00
# define KMS_DRIVER_MINOR 40
2021-05-26 19:09:36 +00:00
# define KMS_DRIVER_PATCHLEVEL 0
2021-09-23 16:59:15 +00:00
int amdgpu_vram_limit = 0 ;
int amdgpu_vis_vram_limit = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_gart_size = - 1 ; /* auto */
int amdgpu_gtt_size = - 1 ; /* auto */
int amdgpu_moverate = - 1 ; /* auto */
2021-09-23 16:59:15 +00:00
int amdgpu_benchmarking = 0 ;
int amdgpu_testing = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_audio = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_disp_priority = 0 ;
int amdgpu_hw_i2c = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_pcie_gen2 = - 1 ;
int amdgpu_msi = - 1 ;
char amdgpu_lockup_timeout [ AMDGPU_MAX_TIMEOUT_PARAM_LENGTH ] ;
int amdgpu_dpm = - 1 ;
int amdgpu_fw_load_type = - 1 ;
int amdgpu_aspm = - 1 ;
int amdgpu_runtime_pm = - 1 ;
uint amdgpu_ip_block_mask = 0xffffffff ;
int amdgpu_bapm = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_deep_color = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_vm_size = - 1 ;
int amdgpu_vm_fragment_size = - 1 ;
int amdgpu_vm_block_size = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_vm_fault_stop = 0 ;
int amdgpu_vm_debug = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_vm_update_mode = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_exp_hw_support = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_dc = - 1 ;
int amdgpu_sched_jobs = 32 ;
int amdgpu_sched_hw_submission = 2 ;
2021-09-23 16:59:15 +00:00
uint amdgpu_pcie_gen_cap = 0 ;
uint amdgpu_pcie_lane_cap = 0 ;
2021-05-26 19:09:36 +00:00
uint amdgpu_cg_mask = 0xffffffff ;
uint amdgpu_pg_mask = 0xffffffff ;
uint amdgpu_sdma_phase_quantum = 32 ;
char * amdgpu_disable_cu = NULL ;
char * amdgpu_virtual_display = NULL ;
2021-09-23 16:59:15 +00:00
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff ;
uint amdgpu_force_long_training = 0 ;
int amdgpu_job_hang_limit = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_lbpw = - 1 ;
int amdgpu_compute_multipipe = - 1 ;
int amdgpu_gpu_recovery = - 1 ; /* auto */
2021-09-23 16:59:15 +00:00
int amdgpu_emu_mode = 0 ;
uint amdgpu_smu_memory_pool_size = 0 ;
/* FBC (bit 0) disabled by default*/
uint amdgpu_dc_feature_mask = 0 ;
uint amdgpu_dc_debug_mask = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_async_gfx_ring = 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_mcbp = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_discovery = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_mes = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_noretry = - 1 ;
int amdgpu_force_asic_type = - 1 ;
2021-09-23 16:59:15 +00:00
int amdgpu_tmz = 0 ;
2021-05-26 19:09:36 +00:00
int amdgpu_reset_method = - 1 ; /* auto */
int amdgpu_num_kcq = - 1 ;
2021-07-12 19:01:19 +00:00
2021-05-26 19:09:36 +00:00
struct amdgpu_mgpu_info mgpu_info = {
. mutex = __MUTEX_INITIALIZER ( mgpu_info . mutex ) ,
} ;
int amdgpu_ras_enable = - 1 ;
uint amdgpu_ras_mask = 0xffffffff ;
int amdgpu_bad_page_threshold = - 1 ;
/**
* DOC : vramlimit ( int )
* Restrict the total amount of VRAM in MiB for testing . The default is 0 ( Use full VRAM ) .
*/
MODULE_PARM_DESC ( vramlimit , " Restrict VRAM for testing, in megabytes " ) ;
module_param_named ( vramlimit , amdgpu_vram_limit , int , 0600 ) ;
/**
* DOC : vis_vramlimit ( int )
* Restrict the amount of CPU visible VRAM in MiB for testing . The default is 0 ( Use full CPU visible VRAM ) .
*/
MODULE_PARM_DESC ( vis_vramlimit , " Restrict visible VRAM for testing, in megabytes " ) ;
module_param_named ( vis_vramlimit , amdgpu_vis_vram_limit , int , 0444 ) ;
/**
* DOC : gartsize ( uint )
* Restrict the size of GART in Mib ( 32 , 64 , etc . ) for testing . The default is - 1 ( The size depends on asic ) .
*/
MODULE_PARM_DESC ( gartsize , " Size of GART to setup in megabytes (32, 64, etc., -1=auto) " ) ;
module_param_named ( gartsize , amdgpu_gart_size , uint , 0600 ) ;
/**
* DOC : gttsize ( int )
* Restrict the size of GTT domain in MiB for testing . The default is - 1 ( It ' s VRAM size if 3 GB < VRAM < 3 / 4 RAM ,
* otherwise 3 / 4 RAM size ) .
*/
MODULE_PARM_DESC ( gttsize , " Size of the GTT domain in megabytes (-1 = auto) " ) ;
module_param_named ( gttsize , amdgpu_gtt_size , int , 0600 ) ;
/**
* DOC : moverate ( int )
* Set maximum buffer migration rate in MB / s . The default is - 1 ( 8 MB / s ) .
*/
MODULE_PARM_DESC ( moverate , " Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled) " ) ;
module_param_named ( moverate , amdgpu_moverate , int , 0600 ) ;
/**
* DOC : benchmark ( int )
* Run benchmarks . The default is 0 ( Skip benchmarks ) .
*/
MODULE_PARM_DESC ( benchmark , " Run benchmark " ) ;
module_param_named ( benchmark , amdgpu_benchmarking , int , 0444 ) ;
/**
* DOC : test ( int )
* Test BO GTT - > VRAM and VRAM - > GTT GPU copies . The default is 0 ( Skip test , only set 1 to run test ) .
*/
MODULE_PARM_DESC ( test , " Run tests " ) ;
module_param_named ( test , amdgpu_testing , int , 0444 ) ;
/**
* DOC : audio ( int )
* Set HDMI / DPAudio . Only affects non - DC display handling . The default is - 1 ( Enabled ) , set 0 to disabled it .
*/
MODULE_PARM_DESC ( audio , " Audio enable (-1 = auto, 0 = disable, 1 = enable) " ) ;
module_param_named ( audio , amdgpu_audio , int , 0444 ) ;
/**
* DOC : disp_priority ( int )
* Set display Priority ( 1 = normal , 2 = high ) . Only affects non - DC display handling . The default is 0 ( auto ) .
*/
MODULE_PARM_DESC ( disp_priority , " Display Priority (0 = auto, 1 = normal, 2 = high) " ) ;
module_param_named ( disp_priority , amdgpu_disp_priority , int , 0444 ) ;
/**
* DOC : hw_i2c ( int )
* To enable hw i2c engine . Only affects non - DC display handling . The default is 0 ( Disabled ) .
*/
MODULE_PARM_DESC ( hw_i2c , " hw i2c engine enable (0 = disable) " ) ;
module_param_named ( hw_i2c , amdgpu_hw_i2c , int , 0444 ) ;
/**
* DOC : pcie_gen2 ( int )
* To disable PCIE Gen2 / 3 mode ( 0 = disable , 1 = enable ) . The default is - 1 ( auto , enabled ) .
*/
MODULE_PARM_DESC ( pcie_gen2 , " PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable) " ) ;
module_param_named ( pcie_gen2 , amdgpu_pcie_gen2 , int , 0444 ) ;
/**
* DOC : msi ( int )
* To disable Message Signaled Interrupts ( MSI ) functionality ( 1 = enable , 0 = disable ) . The default is - 1 ( auto , enabled ) .
*/
MODULE_PARM_DESC ( msi , " MSI support (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( msi , amdgpu_msi , int , 0444 ) ;
/**
* DOC : lockup_timeout ( string )
* Set GPU scheduler timeout value in ms .
*
* The format can be [ Non - Compute ] or [ GFX , Compute , SDMA , Video ] . That is there can be one or
* multiple values specified . 0 and negative values are invalidated . They will be adjusted
* to the default timeout .
*
* - With one value specified , the setting will apply to all non - compute jobs .
* - With multiple values specified , the first one will be for GFX .
* The second one is for Compute . The third and fourth ones are
* for SDMA and Video .
*
* By default ( with no lockup_timeout settings ) , the timeout for all non - compute ( GFX , SDMA and Video )
2021-09-23 16:59:15 +00:00
* jobs is 10000. And there is no timeout enforced on compute jobs .
2021-05-26 19:09:36 +00:00
*/
2021-09-23 16:59:15 +00:00
MODULE_PARM_DESC ( lockup_timeout , " GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
2021-05-26 19:09:36 +00:00
" for passthrough or sriov, 10000 for all jobs. "
" 0: keep default value. negative: infinity timeout), "
" format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
" for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]. " ) ;
module_param_string ( lockup_timeout , amdgpu_lockup_timeout , sizeof ( amdgpu_lockup_timeout ) , 0444 ) ;
/**
* DOC : dpm ( int )
* Override for dynamic power management setting
2021-09-23 16:59:15 +00:00
* ( 0 = disable , 1 = enable , 2 = enable sw smu driver for vega20 )
2021-05-26 19:09:36 +00:00
* The default is - 1 ( auto ) .
*/
MODULE_PARM_DESC ( dpm , " DPM support (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( dpm , amdgpu_dpm , int , 0444 ) ;
/**
* DOC : fw_load_type ( int )
* Set different firmware loading type for debugging ( 0 = direct , 1 = SMU , 2 = PSP ) . The default is - 1 ( auto ) .
*/
MODULE_PARM_DESC ( fw_load_type , " firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto) " ) ;
module_param_named ( fw_load_type , amdgpu_fw_load_type , int , 0444 ) ;
/**
* DOC : aspm ( int )
* To disable ASPM ( 1 = enable , 0 = disable ) . The default is - 1 ( auto , enabled ) .
*/
MODULE_PARM_DESC ( aspm , " ASPM support (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( aspm , amdgpu_aspm , int , 0444 ) ;
/**
* DOC : runpm ( int )
* Override for runtime power management control for dGPUs in PX / HG laptops . The amdgpu driver can dynamically power down
* the dGPU on PX / HG laptops when it is idle . The default is - 1 ( auto enable ) . Setting the value to 0 disables this functionality .
*/
2021-09-23 16:59:15 +00:00
MODULE_PARM_DESC ( runpm , " PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default) " ) ;
2021-05-26 19:09:36 +00:00
module_param_named ( runpm , amdgpu_runtime_pm , int , 0444 ) ;
/**
* DOC : ip_block_mask ( uint )
* Override what IP blocks are enabled on the GPU . Each GPU is a collection of IP blocks ( gfx , display , video , etc . ) .
* Use this parameter to disable specific blocks . Note that the IP blocks do not have a fixed index . Some asics may not have
* some IPs or may include multiple instances of an IP so the ordering various from asic to asic . See the driver output in
* the kernel log for the list of IPs on the asic . The default is 0xffffffff ( enable all blocks on a device ) .
*/
MODULE_PARM_DESC ( ip_block_mask , " IP Block Mask (all blocks enabled (default)) " ) ;
module_param_named ( ip_block_mask , amdgpu_ip_block_mask , uint , 0444 ) ;
/**
* DOC : bapm ( int )
* Bidirectional Application Power Management ( BAPM ) used to dynamically share TDP between CPU and GPU . Set value 0 to disable it .
* The default - 1 ( auto , enabled )
*/
MODULE_PARM_DESC ( bapm , " BAPM support (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( bapm , amdgpu_bapm , int , 0444 ) ;
/**
* DOC : deep_color ( int )
* Set 1 to enable Deep Color support . Only affects non - DC display handling . The default is 0 ( disabled ) .
*/
MODULE_PARM_DESC ( deep_color , " Deep Color support (1 = enable, 0 = disable (default)) " ) ;
module_param_named ( deep_color , amdgpu_deep_color , int , 0444 ) ;
/**
* DOC : vm_size ( int )
* Override the size of the GPU ' s per client virtual address space in GiB . The default is - 1 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( vm_size , " VM address space size in gigabytes (default 64GB) " ) ;
module_param_named ( vm_size , amdgpu_vm_size , int , 0444 ) ;
/**
* DOC : vm_fragment_size ( int )
* Override VM fragment size in bits ( 4 , 5 , etc . 4 = 64 K , 9 = 2 M ) . The default is - 1 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( vm_fragment_size , " VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M) " ) ;
module_param_named ( vm_fragment_size , amdgpu_vm_fragment_size , int , 0444 ) ;
/**
* DOC : vm_block_size ( int )
* Override VM page table size in bits ( default depending on vm_size and hw setup ) . The default is - 1 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( vm_block_size , " VM page table size in bits (default depending on vm_size) " ) ;
module_param_named ( vm_block_size , amdgpu_vm_block_size , int , 0444 ) ;
/**
* DOC : vm_fault_stop ( int )
* Stop on VM fault for debugging ( 0 = never , 1 = print first , 2 = always ) . The default is 0 ( No stop ) .
*/
MODULE_PARM_DESC ( vm_fault_stop , " Stop on VM fault (0 = never (default), 1 = print first, 2 = always) " ) ;
module_param_named ( vm_fault_stop , amdgpu_vm_fault_stop , int , 0444 ) ;
/**
* DOC : vm_debug ( int )
* Debug VM handling ( 0 = disabled , 1 = enabled ) . The default is 0 ( Disabled ) .
*/
MODULE_PARM_DESC ( vm_debug , " Debug VM handling (0 = disabled (default), 1 = enabled) " ) ;
module_param_named ( vm_debug , amdgpu_vm_debug , int , 0644 ) ;
/**
* DOC : vm_update_mode ( int )
* Override VM update mode . VM updated by using CPU ( 0 = never , 1 = Graphics only , 2 = Compute only , 3 = Both ) . The default
* is - 1 ( Only in large BAR ( LB ) systems Compute VM tables will be updated by CPU , otherwise 0 , never ) .
*/
MODULE_PARM_DESC ( vm_update_mode , " VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both " ) ;
module_param_named ( vm_update_mode , amdgpu_vm_update_mode , int , 0444 ) ;
/**
* DOC : exp_hw_support ( int )
* Enable experimental hw support ( 1 = enable ) . The default is 0 ( disabled ) .
*/
MODULE_PARM_DESC ( exp_hw_support , " experimental hw support (1 = enable, 0 = disable (default)) " ) ;
module_param_named ( exp_hw_support , amdgpu_exp_hw_support , int , 0444 ) ;
/**
* DOC : dc ( int )
* Disable / Enable Display Core driver for debugging ( 1 = enable , 0 = disable ) . The default is - 1 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( dc , " Display Core driver (1 = enable, 0 = disable, -1 = auto (default)) " ) ;
module_param_named ( dc , amdgpu_dc , int , 0444 ) ;
/**
* DOC : sched_jobs ( int )
* Override the max number of jobs supported in the sw queue . The default is 32.
*/
MODULE_PARM_DESC ( sched_jobs , " the max number of jobs supported in the sw queue (default 32) " ) ;
module_param_named ( sched_jobs , amdgpu_sched_jobs , int , 0444 ) ;
/**
* DOC : sched_hw_submission ( int )
* Override the max number of HW submissions . The default is 2.
*/
MODULE_PARM_DESC ( sched_hw_submission , " the max number of HW submissions (default 2) " ) ;
module_param_named ( sched_hw_submission , amdgpu_sched_hw_submission , int , 0444 ) ;
/**
* DOC : ppfeaturemask ( hexint )
* Override power features enabled . See enum PP_FEATURE_MASK in drivers / gpu / drm / amd / include / amd_shared . h .
* The default is the current set of stable power features .
*/
MODULE_PARM_DESC ( ppfeaturemask , " all power features enabled (default)) " ) ;
module_param_named ( ppfeaturemask , amdgpu_pp_feature_mask , hexint , 0444 ) ;
/**
* DOC : forcelongtraining ( uint )
* Force long memory training in resume .
* The default is zero , indicates short training in resume .
*/
MODULE_PARM_DESC ( forcelongtraining , " force memory long training " ) ;
module_param_named ( forcelongtraining , amdgpu_force_long_training , uint , 0444 ) ;
/**
* DOC : pcie_gen_cap ( uint )
* Override PCIE gen speed capabilities . See the CAIL flags in drivers / gpu / drm / amd / include / amd_pcie . h .
* The default is 0 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( pcie_gen_cap , " PCIE Gen Caps (0: autodetect (default)) " ) ;
module_param_named ( pcie_gen_cap , amdgpu_pcie_gen_cap , uint , 0444 ) ;
/**
* DOC : pcie_lane_cap ( uint )
* Override PCIE lanes capabilities . See the CAIL flags in drivers / gpu / drm / amd / include / amd_pcie . h .
* The default is 0 ( automatic for each asic ) .
*/
MODULE_PARM_DESC ( pcie_lane_cap , " PCIE Lane Caps (0: autodetect (default)) " ) ;
module_param_named ( pcie_lane_cap , amdgpu_pcie_lane_cap , uint , 0444 ) ;
/**
* DOC : cg_mask ( uint )
* Override Clockgating features enabled on GPU ( 0 = disable clock gating ) . See the AMD_CG_SUPPORT flags in
* drivers / gpu / drm / amd / include / amd_shared . h . The default is 0xffffffff ( all enabled ) .
*/
MODULE_PARM_DESC ( cg_mask , " Clockgating flags mask (0 = disable clock gating) " ) ;
module_param_named ( cg_mask , amdgpu_cg_mask , uint , 0444 ) ;
/**
* DOC : pg_mask ( uint )
* Override Powergating features enabled on GPU ( 0 = disable power gating ) . See the AMD_PG_SUPPORT flags in
* drivers / gpu / drm / amd / include / amd_shared . h . The default is 0xffffffff ( all enabled ) .
*/
MODULE_PARM_DESC ( pg_mask , " Powergating flags mask (0 = disable power gating) " ) ;
module_param_named ( pg_mask , amdgpu_pg_mask , uint , 0444 ) ;
/**
* DOC : sdma_phase_quantum ( uint )
* Override SDMA context switch phase quantum ( x 1 K GPU clock cycles , 0 = no change ) . The default is 32.
*/
MODULE_PARM_DESC ( sdma_phase_quantum , " SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32)) " ) ;
module_param_named ( sdma_phase_quantum , amdgpu_sdma_phase_quantum , uint , 0444 ) ;
/**
* DOC : disable_cu ( charp )
* Set to disable CUs ( It ' s set like se . sh . cu , . . . ) . The default is NULL .
*/
MODULE_PARM_DESC ( disable_cu , " Disable CUs (se.sh.cu,...) " ) ;
module_param_named ( disable_cu , amdgpu_disable_cu , charp , 0444 ) ;
/**
* DOC : virtual_display ( charp )
* Set to enable virtual display feature . This feature provides a virtual display hardware on headless boards
* or in virtualized environments . It will be set like xxxx : xx : xx . x , x ; xxxx : xx : xx . x , x . It ' s the pci address of
* the device , plus the number of crtcs to expose . E . g . , 0000 : 26 : 00.0 , 4 would enable 4 virtual crtcs on the pci
* device at 26 : 00.0 . The default is NULL .
*/
MODULE_PARM_DESC ( virtual_display ,
" Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x) " ) ;
module_param_named ( virtual_display , amdgpu_virtual_display , charp , 0444 ) ;
/**
* DOC : job_hang_limit ( int )
* Set how much time allow a job hang and not drop it . The default is 0.
*/
MODULE_PARM_DESC ( job_hang_limit , " how much time allow a job hang and not drop it (default 0) " ) ;
module_param_named ( job_hang_limit , amdgpu_job_hang_limit , int , 0444 ) ;
/**
* DOC : lbpw ( int )
* Override Load Balancing Per Watt ( LBPW ) support ( 1 = enable , 0 = disable ) . The default is - 1 ( auto , enabled ) .
*/
MODULE_PARM_DESC ( lbpw , " Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( lbpw , amdgpu_lbpw , int , 0444 ) ;
MODULE_PARM_DESC ( compute_multipipe , " Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto) " ) ;
module_param_named ( compute_multipipe , amdgpu_compute_multipipe , int , 0444 ) ;
/**
* DOC : gpu_recovery ( int )
* Set to enable GPU recovery mechanism ( 1 = enable , 0 = disable ) . The default is - 1 ( auto , disabled except SRIOV ) .
*/
2021-09-23 16:59:15 +00:00
MODULE_PARM_DESC ( gpu_recovery , " Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto) " ) ;
2021-05-26 19:09:36 +00:00
module_param_named ( gpu_recovery , amdgpu_gpu_recovery , int , 0444 ) ;
/**
* DOC : emu_mode ( int )
* Set value 1 to enable emulation mode . This is only needed when running on an emulator . The default is 0 ( disabled ) .
*/
MODULE_PARM_DESC ( emu_mode , " Emulation mode, (1 = enable, 0 = disable) " ) ;
module_param_named ( emu_mode , amdgpu_emu_mode , int , 0444 ) ;
/**
* DOC : ras_enable ( int )
* Enable RAS features on the GPU ( 0 = disable , 1 = enable , - 1 = auto ( default ) )
*/
MODULE_PARM_DESC ( ras_enable , " Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default)) " ) ;
module_param_named ( ras_enable , amdgpu_ras_enable , int , 0444 ) ;
/**
* DOC : ras_mask ( uint )
* Mask of RAS features to enable ( default 0xffffffff ) , only valid when ras_enable = = 1
* See the flags in drivers / gpu / drm / amd / amdgpu / amdgpu_ras . h
*/
MODULE_PARM_DESC ( ras_mask , " Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1 " ) ;
module_param_named ( ras_mask , amdgpu_ras_mask , uint , 0444 ) ;
/**
* DOC : si_support ( int )
* Set SI support driver . This parameter works after set config CONFIG_DRM_AMDGPU_SI . For SI asic , when radeon driver is enabled ,
* set value 0 to use radeon driver , while set value 1 to use amdgpu driver . The default is using radeon driver when it available ,
* otherwise using amdgpu driver .
*/
# ifdef CONFIG_DRM_AMDGPU_SI
# if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
int amdgpu_si_support = 0 ;
MODULE_PARM_DESC ( si_support , " SI support (1 = enabled, 0 = disabled (default)) " ) ;
# else
int amdgpu_si_support = 1 ;
MODULE_PARM_DESC ( si_support , " SI support (1 = enabled (default), 0 = disabled) " ) ;
# endif
module_param_named ( si_support , amdgpu_si_support , int , 0444 ) ;
# endif
/**
* DOC : cik_support ( int )
* Set CIK support driver . This parameter works after set config CONFIG_DRM_AMDGPU_CIK . For CIK asic , when radeon driver is enabled ,
* set value 0 to use radeon driver , while set value 1 to use amdgpu driver . The default is using radeon driver when it available ,
* otherwise using amdgpu driver .
*/
# ifdef CONFIG_DRM_AMDGPU_CIK
# if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
int amdgpu_cik_support = 0 ;
MODULE_PARM_DESC ( cik_support , " CIK support (1 = enabled, 0 = disabled (default)) " ) ;
# else
int amdgpu_cik_support = 1 ;
MODULE_PARM_DESC ( cik_support , " CIK support (1 = enabled (default), 0 = disabled) " ) ;
# endif
module_param_named ( cik_support , amdgpu_cik_support , int , 0444 ) ;
# endif
/**
* DOC : smu_memory_pool_size ( uint )
* It is used to reserve gtt for smu debug usage , setting value 0 to disable it . The actual size is value * 256 MiB .
* E . g . 0x1 = 256 Mbyte , 0x2 = 512 Mbyte , 0x4 = 1 Gbyte , 0x8 = 2 GByte . The default is 0 ( disabled ) .
*/
MODULE_PARM_DESC ( smu_memory_pool_size ,
" reserve gtt for smu debug usage, 0 = disable, "
" 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte " ) ;
module_param_named ( smu_memory_pool_size , amdgpu_smu_memory_pool_size , uint , 0444 ) ;
/**
* DOC : async_gfx_ring ( int )
* It is used to enable gfx rings that could be configured with different prioritites or equal priorities
*/
MODULE_PARM_DESC ( async_gfx_ring ,
" Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default)) " ) ;
module_param_named ( async_gfx_ring , amdgpu_async_gfx_ring , int , 0444 ) ;
/**
* DOC : mcbp ( int )
* It is used to enable mid command buffer preemption . ( 0 = disabled ( default ) , 1 = enabled )
*/
MODULE_PARM_DESC ( mcbp ,
" Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled) " ) ;
module_param_named ( mcbp , amdgpu_mcbp , int , 0444 ) ;
/**
* DOC : discovery ( int )
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM .
* ( - 1 = auto ( default ) , 0 = disabled , 1 = enabled )
*/
MODULE_PARM_DESC ( discovery ,
" Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM " ) ;
module_param_named ( discovery , amdgpu_discovery , int , 0444 ) ;
/**
* DOC : mes ( int )
* Enable Micro Engine Scheduler . This is a new hw scheduling engine for gfx , sdma , and compute .
* ( 0 = disabled ( default ) , 1 = enabled )
*/
MODULE_PARM_DESC ( mes ,
" Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled) " ) ;
module_param_named ( mes , amdgpu_mes , int , 0444 ) ;
/**
* DOC : noretry ( int )
2021-09-23 16:59:15 +00:00
* Disable retry faults in the GPU memory controller .
2021-05-26 19:09:36 +00:00
* ( 0 = retry enabled , 1 = retry disabled , - 1 auto ( default ) )
*/
MODULE_PARM_DESC ( noretry ,
" Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default)) " ) ;
module_param_named ( noretry , amdgpu_noretry , int , 0644 ) ;
/**
* DOC : force_asic_type ( int )
* A non negative value used to specify the asic type for all supported GPUs .
*/
MODULE_PARM_DESC ( force_asic_type ,
" A non negative value used to specify the asic type for all supported GPUs " ) ;
module_param_named ( force_asic_type , amdgpu_force_asic_type , int , 0444 ) ;
# ifdef CONFIG_HSA_AMD
/**
* DOC : sched_policy ( int )
* Set scheduling policy . Default is HWS ( hardware scheduling ) with over - subscription .
* Setting 1 disables over - subscription . Setting 2 disables HWS and statically
* assigns queues to HQDs .
*/
int sched_policy = KFD_SCHED_POLICY_HWS ;
module_param ( sched_policy , int , 0444 ) ;
MODULE_PARM_DESC ( sched_policy ,
" Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only) " ) ;
/**
* DOC : hws_max_conc_proc ( int )
* Maximum number of processes that HWS can schedule concurrently . The maximum is the
* number of VMIDs assigned to the HWS , which is also the default .
*/
int hws_max_conc_proc = 8 ;
module_param ( hws_max_conc_proc , int , 0444 ) ;
MODULE_PARM_DESC ( hws_max_conc_proc ,
" Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default)) " ) ;
/**
* DOC : cwsr_enable ( int )
* CWSR ( compute wave store and resume ) allows the GPU to preempt shader execution in
* the middle of a compute wave . Default is 1 to enable this feature . Setting 0
* disables it .
*/
int cwsr_enable = 1 ;
module_param ( cwsr_enable , int , 0444 ) ;
MODULE_PARM_DESC ( cwsr_enable , " CWSR enable (0 = Off, 1 = On (Default)) " ) ;
/**
* DOC : max_num_of_queues_per_device ( int )
* Maximum number of queues per device . Valid setting is between 1 and 4096. Default
* is 4096.
*/
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT ;
module_param ( max_num_of_queues_per_device , int , 0444 ) ;
MODULE_PARM_DESC ( max_num_of_queues_per_device ,
" Maximum number of supported queues per device (1 = Minimum, 4096 = default) " ) ;
/**
* DOC : send_sigterm ( int )
* Send sigterm to HSA process on unhandled exceptions . Default is not to send sigterm
* but just print errors on dmesg . Setting 1 enables sending sigterm .
*/
int send_sigterm ;
module_param ( send_sigterm , int , 0444 ) ;
MODULE_PARM_DESC ( send_sigterm ,
" Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable) " ) ;
/**
* DOC : debug_largebar ( int )
* Set debug_largebar as 1 to enable simulating large - bar capability on non - large bar
* system . This limits the VRAM size reported to ROCm applications to the visible
* size , usually 256 MB .
* Default value is 0 , diabled .
*/
int debug_largebar ;
module_param ( debug_largebar , int , 0444 ) ;
MODULE_PARM_DESC ( debug_largebar ,
" Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable) " ) ;
/**
* DOC : ignore_crat ( int )
* Ignore CRAT table during KFD initialization . By default , KFD uses the ACPI CRAT
* table to get information about AMD APUs . This option can serve as a workaround on
* systems with a broken CRAT table .
*
* Default is auto ( according to asic type , iommu_v2 , and crat table , to decide
* whehter use CRAT )
*/
int ignore_crat ;
module_param ( ignore_crat , int , 0444 ) ;
MODULE_PARM_DESC ( ignore_crat ,
" Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT) " ) ;
/**
* DOC : halt_if_hws_hang ( int )
* Halt if HWS hang is detected . Default value , 0 , disables the halt on hang .
* Setting 1 enables halt on hang .
*/
int halt_if_hws_hang ;
module_param ( halt_if_hws_hang , int , 0644 ) ;
MODULE_PARM_DESC ( halt_if_hws_hang , " Halt if HWS hang is detected (0 = off (default), 1 = on) " ) ;
/**
* DOC : hws_gws_support ( bool )
* Assume that HWS supports GWS barriers regardless of what firmware version
* check says . Default value : false ( rely on MEC2 firmware version check ) .
*/
bool hws_gws_support ;
module_param ( hws_gws_support , bool , 0444 ) ;
MODULE_PARM_DESC ( hws_gws_support , " Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported) " ) ;
/**
* DOC : queue_preemption_timeout_ms ( int )
* queue preemption timeout in ms ( 1 = Minimum , 9000 = default )
*/
int queue_preemption_timeout_ms = 9000 ;
module_param ( queue_preemption_timeout_ms , int , 0644 ) ;
MODULE_PARM_DESC ( queue_preemption_timeout_ms , " queue preemption timeout in ms (1 = Minimum, 9000 = default) " ) ;
/**
* DOC : debug_evictions ( bool )
* Enable extra debug messages to help determine the cause of evictions
*/
bool debug_evictions ;
module_param ( debug_evictions , bool , 0644 ) ;
MODULE_PARM_DESC ( debug_evictions , " enable eviction debug messages (false = default) " ) ;
/**
* DOC : no_system_mem_limit ( bool )
* Disable system memory limit , to support multiple process shared memory
*/
bool no_system_mem_limit ;
module_param ( no_system_mem_limit , bool , 0644 ) ;
MODULE_PARM_DESC ( no_system_mem_limit , " disable system memory limit (false = default) " ) ;
# endif
/**
* DOC : dcfeaturemask ( uint )
* Override display features enabled . See enum DC_FEATURE_MASK in drivers / gpu / drm / amd / include / amd_shared . h .
* The default is the current set of stable display features .
*/
MODULE_PARM_DESC ( dcfeaturemask , " all stable DC features enabled (default)) " ) ;
module_param_named ( dcfeaturemask , amdgpu_dc_feature_mask , uint , 0444 ) ;
/**
* DOC : dcdebugmask ( uint )
* Override display features enabled . See enum DC_DEBUG_MASK in drivers / gpu / drm / amd / include / amd_shared . h .
*/
MODULE_PARM_DESC ( dcdebugmask , " all debug options disabled (default)) " ) ;
module_param_named ( dcdebugmask , amdgpu_dc_debug_mask , uint , 0444 ) ;
/**
* DOC : abmlevel ( uint )
* Override the default ABM ( Adaptive Backlight Management ) level used for DC
* enabled hardware . Requires DMCU to be supported and loaded .
* Valid levels are 0 - 4. A value of 0 indicates that ABM should be disabled by
* default . Values 1 - 4 control the maximum allowable brightness reduction via
* the ABM algorithm , with 1 being the least reduction and 4 being the most
* reduction .
*
* Defaults to 0 , or disabled . Userspace can still override this level later
* after boot .
*/
2021-09-23 16:59:15 +00:00
uint amdgpu_dm_abm_level = 0 ;
2021-05-26 19:09:36 +00:00
MODULE_PARM_DESC ( abmlevel , " ABM level (0 = off (default), 1-4 = backlight reduction level) " ) ;
module_param_named ( abmlevel , amdgpu_dm_abm_level , uint , 0444 ) ;
int amdgpu_backlight = - 1 ;
MODULE_PARM_DESC ( backlight , " Backlight control (0 = pwm, 1 = aux, -1 auto (default)) " ) ;
module_param_named ( backlight , amdgpu_backlight , bint , 0444 ) ;
/**
* DOC : tmz ( int )
* Trusted Memory Zone ( TMZ ) is a method to protect data being written
* to or read from memory .
*
* The default value : 0 ( off ) . TODO : change to auto till it is completed .
*/
2021-09-23 16:59:15 +00:00
MODULE_PARM_DESC ( tmz , " Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on) " ) ;
2021-05-26 19:09:36 +00:00
module_param_named ( tmz , amdgpu_tmz , int , 0444 ) ;
/**
* DOC : reset_method ( int )
2021-09-23 16:59:15 +00:00
* GPU reset method ( - 1 = auto ( default ) , 0 = legacy , 1 = mode0 , 2 = mode1 , 3 = mode2 , 4 = baco )
2021-05-26 19:09:36 +00:00
*/
2021-09-23 16:59:15 +00:00
MODULE_PARM_DESC ( reset_method , " GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) " ) ;
2021-05-26 19:09:36 +00:00
module_param_named ( reset_method , amdgpu_reset_method , int , 0444 ) ;
/**
* DOC : bad_page_threshold ( int )
* Bad page threshold is to specify the threshold value of faulty pages
* detected by RAS ECC , that may result in GPU entering bad status if total
* faulty pages by ECC exceed threshold value and leave it for user ' s further
* check .
*/
MODULE_PARM_DESC ( bad_page_threshold , " Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement) " ) ;
module_param_named ( bad_page_threshold , amdgpu_bad_page_threshold , int , 0444 ) ;
MODULE_PARM_DESC ( num_kcq , " number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+) " ) ;
module_param_named ( num_kcq , amdgpu_num_kcq , int , 0444 ) ;
static const struct pci_device_id pciidlist [ ] = {
# ifdef CONFIG_DRM_AMDGPU_SI
{ 0x1002 , 0x6780 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6784 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6788 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x678A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6790 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6791 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6792 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6798 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6799 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x679A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x679B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x679E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x679F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TAHITI } ,
{ 0x1002 , 0x6800 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6801 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6802 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6806 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6808 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6809 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6810 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6811 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6816 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6817 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6818 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6819 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_PITCAIRN } ,
{ 0x1002 , 0x6600 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6601 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6602 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6603 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6604 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6605 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6606 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6607 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6608 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND } ,
{ 0x1002 , 0x6610 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND } ,
{ 0x1002 , 0x6611 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND } ,
{ 0x1002 , 0x6613 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND } ,
{ 0x1002 , 0x6617 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6620 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6621 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6623 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6631 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_OLAND } ,
{ 0x1002 , 0x6820 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6821 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6822 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6823 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6824 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6825 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6826 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6827 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6828 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x6829 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x682A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x682B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x682C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x682D , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x682F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6830 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6831 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6835 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x6837 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x6838 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x6839 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x683B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x683D , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x683F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VERDE } ,
{ 0x1002 , 0x6660 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6663 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6664 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6665 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6667 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x666F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAINAN | AMD_IS_MOBILITY } ,
# endif
# ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{ 0x1002 , 0x1304 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x1305 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1306 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x1307 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1309 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130D , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x130F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1310 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1311 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1312 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1313 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1315 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1316 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x1317 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x1318 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x131B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x131C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
{ 0x1002 , 0x131D , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KAVERI | AMD_IS_APU } ,
/* Bonaire */
{ 0x1002 , 0x6640 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6641 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6646 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6647 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE | AMD_IS_MOBILITY } ,
{ 0x1002 , 0x6649 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x6650 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x6651 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x6658 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x665c , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x665d , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
{ 0x1002 , 0x665f , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_BONAIRE } ,
/* Hawaii */
{ 0x1002 , 0x67A0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67A1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67A2 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67A8 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67A9 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67AA , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67B0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67B1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67B8 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67B9 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67BA , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
{ 0x1002 , 0x67BE , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_HAWAII } ,
/* Kabini */
{ 0x1002 , 0x9830 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9831 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x9832 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9833 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x9834 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9835 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x9836 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9837 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x9838 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9839 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x983a , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x983b , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x983c , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x983d , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x983e , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
{ 0x1002 , 0x983f , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_KABINI | AMD_IS_APU } ,
/* mullins */
{ 0x1002 , 0x9850 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9851 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9852 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9853 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9854 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9855 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9856 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9857 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9858 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x9859 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985D , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
{ 0x1002 , 0x985F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_MULLINS | AMD_IS_MOBILITY | AMD_IS_APU } ,
# endif
/* topaz */
{ 0x1002 , 0x6900 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TOPAZ } ,
{ 0x1002 , 0x6901 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TOPAZ } ,
{ 0x1002 , 0x6902 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TOPAZ } ,
{ 0x1002 , 0x6903 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TOPAZ } ,
{ 0x1002 , 0x6907 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TOPAZ } ,
/* tonga */
{ 0x1002 , 0x6920 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6921 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6928 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6929 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x692B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x692F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6930 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6938 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
{ 0x1002 , 0x6939 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_TONGA } ,
/* fiji */
{ 0x1002 , 0x7300 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_FIJI } ,
{ 0x1002 , 0x730F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_FIJI } ,
/* carrizo */
{ 0x1002 , 0x9870 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_CARRIZO | AMD_IS_APU } ,
{ 0x1002 , 0x9874 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_CARRIZO | AMD_IS_APU } ,
{ 0x1002 , 0x9875 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_CARRIZO | AMD_IS_APU } ,
{ 0x1002 , 0x9876 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_CARRIZO | AMD_IS_APU } ,
{ 0x1002 , 0x9877 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_CARRIZO | AMD_IS_APU } ,
/* stoney */
{ 0x1002 , 0x98E4 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_STONEY | AMD_IS_APU } ,
/* Polaris11 */
{ 0x1002 , 0x67E0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67E3 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67E8 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67EB , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67EF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67FF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67E1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67E7 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
{ 0x1002 , 0x67E9 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS11 } ,
/* Polaris10 */
{ 0x1002 , 0x67C0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C2 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C4 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C7 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67D0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67DF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C8 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67C9 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67CA , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67CC , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x67CF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
{ 0x1002 , 0x6FDF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS10 } ,
/* Polaris12 */
{ 0x1002 , 0x6980 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6981 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6985 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6986 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6987 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6995 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x6997 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
{ 0x1002 , 0x699F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_POLARIS12 } ,
/* VEGAM */
{ 0x1002 , 0x694C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGAM } ,
{ 0x1002 , 0x694E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGAM } ,
{ 0x1002 , 0x694F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGAM } ,
/* Vega 10 */
{ 0x1002 , 0x6860 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6861 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6862 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6863 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6864 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6867 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6868 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x6869 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686a , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686b , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686c , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686d , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686e , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x686f , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
{ 0x1002 , 0x687f , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA10 } ,
/* Vega 12 */
{ 0x1002 , 0x69A0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA12 } ,
{ 0x1002 , 0x69A1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA12 } ,
{ 0x1002 , 0x69A2 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA12 } ,
{ 0x1002 , 0x69A3 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA12 } ,
{ 0x1002 , 0x69AF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA12 } ,
/* Vega 20 */
{ 0x1002 , 0x66A0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66A1 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66A2 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66A3 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66A4 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66A7 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
{ 0x1002 , 0x66AF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_VEGA20 } ,
/* Raven */
{ 0x1002 , 0x15dd , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_RAVEN | AMD_IS_APU } ,
{ 0x1002 , 0x15d8 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_RAVEN | AMD_IS_APU } ,
/* Arcturus */
{ 0x1002 , 0x738C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_ARCTURUS } ,
{ 0x1002 , 0x7388 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_ARCTURUS } ,
{ 0x1002 , 0x738E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_ARCTURUS } ,
{ 0x1002 , 0x7390 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_ARCTURUS } ,
/* Navi10 */
{ 0x1002 , 0x7310 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x7312 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x7318 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x7319 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x731A , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x731B , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x731E , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
{ 0x1002 , 0x731F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI10 } ,
/* Navi14 */
{ 0x1002 , 0x7340 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI14 } ,
{ 0x1002 , 0x7341 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI14 } ,
{ 0x1002 , 0x7347 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI14 } ,
{ 0x1002 , 0x734F , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI14 } ,
/* Renoir */
{ 0x1002 , 0x1636 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_RENOIR | AMD_IS_APU } ,
{ 0x1002 , 0x1638 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_RENOIR | AMD_IS_APU } ,
{ 0x1002 , 0x164C , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_RENOIR | AMD_IS_APU } ,
/* Navi12 */
{ 0x1002 , 0x7360 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI12 } ,
{ 0x1002 , 0x7362 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_NAVI12 } ,
/* Sienna_Cichlid */
{ 0x1002 , 0x73A0 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73A2 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73A3 , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73AB , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73AE , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73AF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0x1002 , 0x73BF , PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , CHIP_SIENNA_CICHLID } ,
{ 0 , 0 , 0 }
} ;
MODULE_DEVICE_TABLE ( pci , pciidlist ) ;
2021-09-23 16:59:15 +00:00
static struct drm_driver kms_driver ;
2021-05-26 19:09:36 +00:00
static int amdgpu_pci_probe ( struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
struct drm_device * ddev ;
struct amdgpu_device * adev ;
unsigned long flags = ent - > driver_data ;
int ret , retry = 0 ;
bool supports_atomic = false ;
if ( ! amdgpu_virtual_display & &
amdgpu_device_asic_has_dc_support ( flags & AMD_ASIC_MASK ) )
supports_atomic = true ;
if ( ( flags & AMD_EXP_HW_SUPPORT ) & & ! amdgpu_exp_hw_support ) {
DRM_INFO ( " This hardware requires experimental hardware support. \n "
" See modparam exp_hw_support \n " ) ;
return - ENODEV ;
}
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however , SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip .
*/
if ( mem_encrypt_active ( ) & & ( ( flags & AMD_ASIC_MASK ) = = CHIP_RAVEN ) ) {
dev_info ( & pdev - > dev ,
" SME is not compatible with RAVEN \n " ) ;
return - ENOTSUPP ;
}
# ifdef CONFIG_DRM_AMDGPU_SI
if ( ! amdgpu_si_support ) {
switch ( flags & AMD_ASIC_MASK ) {
case CHIP_TAHITI :
case CHIP_PITCAIRN :
case CHIP_VERDE :
case CHIP_OLAND :
case CHIP_HAINAN :
dev_info ( & pdev - > dev ,
" SI support provided by radeon. \n " ) ;
dev_info ( & pdev - > dev ,
" Use radeon.si_support=0 amdgpu.si_support=1 to override. \n "
) ;
return - ENODEV ;
}
}
# endif
# ifdef CONFIG_DRM_AMDGPU_CIK
if ( ! amdgpu_cik_support ) {
switch ( flags & AMD_ASIC_MASK ) {
case CHIP_KAVERI :
case CHIP_BONAIRE :
case CHIP_HAWAII :
case CHIP_KABINI :
case CHIP_MULLINS :
dev_info ( & pdev - > dev ,
" CIK support provided by radeon. \n " ) ;
dev_info ( & pdev - > dev ,
" Use radeon.cik_support=0 amdgpu.cik_support=1 to override. \n "
) ;
return - ENODEV ;
}
}
# endif
/* Get rid of things like offb */
2021-09-23 16:59:15 +00:00
ret = drm_fb_helper_remove_conflicting_pci_framebuffers ( pdev , " amdgpudrmfb " ) ;
2021-05-26 19:09:36 +00:00
if ( ret )
return ret ;
2021-09-23 16:59:15 +00:00
adev = devm_drm_dev_alloc ( & pdev - > dev , & kms_driver , typeof ( * adev ) , ddev ) ;
2021-05-26 19:09:36 +00:00
if ( IS_ERR ( adev ) )
return PTR_ERR ( adev ) ;
adev - > dev = & pdev - > dev ;
adev - > pdev = pdev ;
ddev = adev_to_drm ( adev ) ;
if ( ! supports_atomic )
ddev - > driver_features & = ~ DRIVER_ATOMIC ;
ret = pci_enable_device ( pdev ) ;
if ( ret )
return ret ;
2021-09-23 16:59:15 +00:00
ddev - > pdev = pdev ;
2021-05-26 19:09:36 +00:00
pci_set_drvdata ( pdev , ddev ) ;
ret = amdgpu_driver_load_kms ( adev , ent - > driver_data ) ;
if ( ret )
goto err_pci ;
retry_init :
ret = drm_dev_register ( ddev , ent - > driver_data ) ;
if ( ret = = - EAGAIN & & + + retry < = 3 ) {
DRM_INFO ( " retry init %d \n " , retry ) ;
/* Don't request EX mode too frequently which is attacking */
msleep ( 5000 ) ;
goto retry_init ;
} else if ( ret ) {
goto err_pci ;
}
ret = amdgpu_debugfs_init ( adev ) ;
if ( ret )
DRM_ERROR ( " Creating debugfs files failed (%d). \n " , ret ) ;
return 0 ;
err_pci :
pci_disable_device ( pdev ) ;
return ret ;
}
static void
amdgpu_pci_remove ( struct pci_dev * pdev )
{
struct drm_device * dev = pci_get_drvdata ( pdev ) ;
2021-09-23 16:59:15 +00:00
# ifdef MODULE
if ( THIS_MODULE - > state ! = MODULE_STATE_GOING )
# endif
DRM_ERROR ( " Hotplug removal is not supported \n " ) ;
2021-05-26 19:09:36 +00:00
drm_dev_unplug ( dev ) ;
amdgpu_driver_unload_kms ( dev ) ;
pci_disable_device ( pdev ) ;
2021-09-23 16:59:15 +00:00
pci_set_drvdata ( pdev , NULL ) ;
2021-05-26 19:09:36 +00:00
}
static void
amdgpu_pci_shutdown ( struct pci_dev * pdev )
{
struct drm_device * dev = pci_get_drvdata ( pdev ) ;
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
if ( amdgpu_ras_intr_triggered ( ) )
return ;
/* if we are running in a VM, make sure the device
* torn down properly on reboot / shutdown .
* unfortunately we can ' t detect certain
* hypervisors so just do this all the time .
*/
if ( ! amdgpu_passthrough ( adev ) )
adev - > mp1_state = PP_MP1_STATE_UNLOAD ;
amdgpu_device_ip_suspend ( adev ) ;
adev - > mp1_state = PP_MP1_STATE_NONE ;
}
static int amdgpu_pmops_suspend ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
2021-09-23 16:59:15 +00:00
return amdgpu_device_suspend ( drm_dev , true ) ;
2021-05-26 19:09:36 +00:00
}
static int amdgpu_pmops_resume ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
2021-09-23 16:59:15 +00:00
return amdgpu_device_resume ( drm_dev , true ) ;
2021-05-26 19:09:36 +00:00
}
static int amdgpu_pmops_freeze ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
struct amdgpu_device * adev = drm_to_adev ( drm_dev ) ;
int r ;
2021-09-23 16:59:15 +00:00
adev - > in_hibernate = true ;
2021-05-26 19:09:36 +00:00
r = amdgpu_device_suspend ( drm_dev , true ) ;
2021-09-23 16:59:15 +00:00
adev - > in_hibernate = false ;
2021-05-26 19:09:36 +00:00
if ( r )
return r ;
return amdgpu_asic_reset ( adev ) ;
}
static int amdgpu_pmops_thaw ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
return amdgpu_device_resume ( drm_dev , true ) ;
}
static int amdgpu_pmops_poweroff ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
return amdgpu_device_suspend ( drm_dev , true ) ;
}
static int amdgpu_pmops_restore ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
return amdgpu_device_resume ( drm_dev , true ) ;
}
static int amdgpu_pmops_runtime_suspend ( struct device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct drm_device * drm_dev = pci_get_drvdata ( pdev ) ;
struct amdgpu_device * adev = drm_to_adev ( drm_dev ) ;
int ret , i ;
if ( ! adev - > runpm ) {
pm_runtime_forbid ( dev ) ;
return - EBUSY ;
}
/* wait for all rings to drain before suspending */
for ( i = 0 ; i < AMDGPU_MAX_RINGS ; i + + ) {
struct amdgpu_ring * ring = adev - > rings [ i ] ;
if ( ring & & ring - > sched . ready ) {
ret = amdgpu_fence_wait_empty ( ring ) ;
if ( ret )
return - EBUSY ;
}
}
adev - > in_runpm = true ;
2021-09-23 16:59:15 +00:00
if ( amdgpu_device_supports_boco ( drm_dev ) )
2021-05-26 19:09:36 +00:00
drm_dev - > switch_power_state = DRM_SWITCH_POWER_CHANGING ;
2021-09-23 16:59:15 +00:00
drm_kms_helper_poll_disable ( drm_dev ) ;
2021-05-26 19:09:36 +00:00
ret = amdgpu_device_suspend ( drm_dev , false ) ;
2021-09-23 16:59:15 +00:00
if ( ret )
2021-05-26 19:09:36 +00:00
return ret ;
2021-09-23 16:59:15 +00:00
if ( amdgpu_device_supports_boco ( drm_dev ) ) {
2021-05-26 19:09:36 +00:00
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3 .
*/
2021-09-23 16:59:15 +00:00
if ( amdgpu_is_atpx_hybrid ( ) ) {
pci_ignore_hotplug ( pdev ) ;
} else {
amdgpu_device_cache_pci_state ( pdev ) ;
pci_disable_device ( pdev ) ;
pci_ignore_hotplug ( pdev ) ;
pci_set_power_state ( pdev , PCI_D3cold ) ;
}
2021-05-26 19:09:36 +00:00
drm_dev - > switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF ;
2021-08-22 16:10:56 +00:00
} else if ( amdgpu_device_supports_boco ( drm_dev ) ) {
/* nothing to do */
2021-05-26 19:09:36 +00:00
} else if ( amdgpu_device_supports_baco ( drm_dev ) ) {
amdgpu_device_baco_enter ( drm_dev ) ;
}
return 0 ;
}
static int amdgpu_pmops_runtime_resume ( struct device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct drm_device * drm_dev = pci_get_drvdata ( pdev ) ;
struct amdgpu_device * adev = drm_to_adev ( drm_dev ) ;
int ret ;
if ( ! adev - > runpm )
return - EINVAL ;
2021-09-23 16:59:15 +00:00
if ( amdgpu_device_supports_boco ( drm_dev ) ) {
2021-05-26 19:09:36 +00:00
drm_dev - > switch_power_state = DRM_SWITCH_POWER_CHANGING ;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3 .
*/
2021-09-23 16:59:15 +00:00
if ( amdgpu_is_atpx_hybrid ( ) ) {
pci_set_master ( pdev ) ;
} else {
pci_set_power_state ( pdev , PCI_D0 ) ;
amdgpu_device_load_pci_state ( pdev ) ;
ret = pci_enable_device ( pdev ) ;
if ( ret )
return ret ;
pci_set_master ( pdev ) ;
}
2021-05-26 19:09:36 +00:00
} else if ( amdgpu_device_supports_baco ( drm_dev ) ) {
amdgpu_device_baco_exit ( drm_dev ) ;
}
ret = amdgpu_device_resume ( drm_dev , false ) ;
2021-09-23 16:59:15 +00:00
drm_kms_helper_poll_enable ( drm_dev ) ;
if ( amdgpu_device_supports_boco ( drm_dev ) )
2021-05-26 19:09:36 +00:00
drm_dev - > switch_power_state = DRM_SWITCH_POWER_ON ;
adev - > in_runpm = false ;
return 0 ;
}
static int amdgpu_pmops_runtime_idle ( struct device * dev )
{
struct drm_device * drm_dev = dev_get_drvdata ( dev ) ;
struct amdgpu_device * adev = drm_to_adev ( drm_dev ) ;
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
int ret = 1 ;
if ( ! adev - > runpm ) {
pm_runtime_forbid ( dev ) ;
return - EBUSY ;
}
if ( amdgpu_device_has_dc_support ( adev ) ) {
struct drm_crtc * crtc ;
2021-09-23 16:59:15 +00:00
drm_modeset_lock_all ( drm_dev ) ;
2021-05-26 19:09:36 +00:00
drm_for_each_crtc ( crtc , drm_dev ) {
2021-09-23 16:59:15 +00:00
if ( crtc - > state - > active ) {
2021-05-26 19:09:36 +00:00
ret = - EBUSY ;
break ;
2021-09-23 16:59:15 +00:00
}
2021-05-26 19:09:36 +00:00
}
2021-09-23 16:59:15 +00:00
drm_modeset_unlock_all ( drm_dev ) ;
2021-05-26 19:09:36 +00:00
} else {
struct drm_connector * list_connector ;
struct drm_connector_list_iter iter ;
mutex_lock ( & drm_dev - > mode_config . mutex ) ;
drm_modeset_lock ( & drm_dev - > mode_config . connection_mutex , NULL ) ;
drm_connector_list_iter_begin ( drm_dev , & iter ) ;
drm_for_each_connector_iter ( list_connector , & iter ) {
if ( list_connector - > dpms = = DRM_MODE_DPMS_ON ) {
ret = - EBUSY ;
break ;
}
}
drm_connector_list_iter_end ( & iter ) ;
drm_modeset_unlock ( & drm_dev - > mode_config . connection_mutex ) ;
mutex_unlock ( & drm_dev - > mode_config . mutex ) ;
}
if ( ret = = - EBUSY )
DRM_DEBUG_DRIVER ( " failing to power off - crtc active \n " ) ;
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_autosuspend ( dev ) ;
return ret ;
}
long amdgpu_drm_ioctl ( struct file * filp ,
unsigned int cmd , unsigned long arg )
{
struct drm_file * file_priv = filp - > private_data ;
struct drm_device * dev ;
long ret ;
dev = file_priv - > minor - > dev ;
ret = pm_runtime_get_sync ( dev - > dev ) ;
if ( ret < 0 )
goto out ;
ret = drm_ioctl ( filp , cmd , arg ) ;
pm_runtime_mark_last_busy ( dev - > dev ) ;
out :
pm_runtime_put_autosuspend ( dev - > dev ) ;
return ret ;
}
static const struct dev_pm_ops amdgpu_pm_ops = {
. suspend = amdgpu_pmops_suspend ,
. resume = amdgpu_pmops_resume ,
. freeze = amdgpu_pmops_freeze ,
. thaw = amdgpu_pmops_thaw ,
. poweroff = amdgpu_pmops_poweroff ,
. restore = amdgpu_pmops_restore ,
. runtime_suspend = amdgpu_pmops_runtime_suspend ,
. runtime_resume = amdgpu_pmops_runtime_resume ,
. runtime_idle = amdgpu_pmops_runtime_idle ,
} ;
static int amdgpu_flush ( struct file * f , fl_owner_t id )
{
struct drm_file * file_priv = f - > private_data ;
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY ;
timeout = amdgpu_ctx_mgr_entity_flush ( & fpriv - > ctx_mgr , timeout ) ;
timeout = amdgpu_vm_wait_idle ( & fpriv - > vm , timeout ) ;
return timeout > = 0 ? 0 : timeout ;
}
static const struct file_operations amdgpu_driver_kms_fops = {
. owner = THIS_MODULE ,
. open = drm_open ,
. flush = amdgpu_flush ,
. release = drm_release ,
. unlocked_ioctl = amdgpu_drm_ioctl ,
2021-09-23 16:59:15 +00:00
. mmap = amdgpu_mmap ,
2021-05-26 19:09:36 +00:00
. poll = drm_poll ,
. read = drm_read ,
# ifdef CONFIG_COMPAT
. compat_ioctl = amdgpu_kms_compat_ioctl ,
# endif
} ;
int amdgpu_file_to_fpriv ( struct file * filp , struct amdgpu_fpriv * * fpriv )
{
2021-09-23 16:59:15 +00:00
struct drm_file * file ;
2021-05-26 19:09:36 +00:00
if ( ! filp )
return - EINVAL ;
if ( filp - > f_op ! = & amdgpu_driver_kms_fops ) {
return - EINVAL ;
}
file = filp - > private_data ;
* fpriv = file - > driver_priv ;
return 0 ;
}
2021-09-23 16:59:15 +00:00
static struct drm_driver kms_driver = {
2021-05-26 19:09:36 +00:00
. driver_features =
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE ,
. open = amdgpu_driver_open_kms ,
. postclose = amdgpu_driver_postclose_kms ,
. lastclose = amdgpu_driver_lastclose_kms ,
. irq_handler = amdgpu_irq_handler ,
. ioctls = amdgpu_ioctls_kms ,
2021-09-23 16:59:15 +00:00
. gem_free_object_unlocked = amdgpu_gem_object_free ,
. gem_open_object = amdgpu_gem_object_open ,
. gem_close_object = amdgpu_gem_object_close ,
2021-05-26 19:09:36 +00:00
. dumb_create = amdgpu_mode_dumb_create ,
. dumb_map_offset = amdgpu_mode_dumb_mmap ,
. fops = & amdgpu_driver_kms_fops ,
. prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
. prime_fd_to_handle = drm_gem_prime_fd_to_handle ,
2021-09-23 16:59:15 +00:00
. gem_prime_export = amdgpu_gem_prime_export ,
2021-05-26 19:09:36 +00:00
. gem_prime_import = amdgpu_gem_prime_import ,
2021-09-23 16:59:15 +00:00
. gem_prime_vmap = amdgpu_gem_prime_vmap ,
. gem_prime_vunmap = amdgpu_gem_prime_vunmap ,
. gem_prime_mmap = amdgpu_gem_prime_mmap ,
2021-05-26 19:09:36 +00:00
. name = DRIVER_NAME ,
. desc = DRIVER_DESC ,
. date = DRIVER_DATE ,
. major = KMS_DRIVER_MAJOR ,
. minor = KMS_DRIVER_MINOR ,
. patchlevel = KMS_DRIVER_PATCHLEVEL ,
} ;
static struct pci_error_handlers amdgpu_pci_err_handler = {
. error_detected = amdgpu_pci_error_detected ,
. mmio_enabled = amdgpu_pci_mmio_enabled ,
. slot_reset = amdgpu_pci_slot_reset ,
. resume = amdgpu_pci_resume ,
} ;
static struct pci_driver amdgpu_kms_pci_driver = {
. name = DRIVER_NAME ,
. id_table = pciidlist ,
. probe = amdgpu_pci_probe ,
. remove = amdgpu_pci_remove ,
. shutdown = amdgpu_pci_shutdown ,
. driver . pm = & amdgpu_pm_ops ,
. err_handler = & amdgpu_pci_err_handler ,
} ;
static int __init amdgpu_init ( void )
{
int r ;
if ( vgacon_text_force ( ) ) {
DRM_ERROR ( " VGACON disables amdgpu kernel modesetting. \n " ) ;
return - EINVAL ;
}
r = amdgpu_sync_init ( ) ;
if ( r )
goto error_sync ;
r = amdgpu_fence_slab_init ( ) ;
if ( r )
goto error_fence ;
DRM_INFO ( " amdgpu kernel modesetting enabled. \n " ) ;
2021-09-23 16:59:15 +00:00
kms_driver . num_ioctls = amdgpu_max_kms_ioctl ;
2021-05-26 19:09:36 +00:00
amdgpu_register_atpx_handler ( ) ;
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init ( ) ;
/* let modprobe override vga console setting */
return pci_register_driver ( & amdgpu_kms_pci_driver ) ;
error_fence :
amdgpu_sync_fini ( ) ;
error_sync :
return r ;
}
static void __exit amdgpu_exit ( void )
{
amdgpu_amdkfd_fini ( ) ;
pci_unregister_driver ( & amdgpu_kms_pci_driver ) ;
amdgpu_unregister_atpx_handler ( ) ;
amdgpu_sync_fini ( ) ;
amdgpu_fence_slab_fini ( ) ;
mmu_notifier_synchronize ( ) ;
}
module_init ( amdgpu_init ) ;
module_exit ( amdgpu_exit ) ;
MODULE_AUTHOR ( DRIVER_AUTHOR ) ;
MODULE_DESCRIPTION ( DRIVER_DESC ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;