mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-30 14:52:17 +00:00
phase 8
This commit is contained in:
parent
b9d9e26075
commit
a80f575781
@ -247,7 +247,7 @@ static void synth_flush(struct spk_synth *synth)
|
||||
static int synth_probe(struct spk_synth *synth)
|
||||
{
|
||||
unsigned int port_val = 0;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
pr_info("Probing for %s.\n", synth->long_name);
|
||||
if (port_forced) {
|
||||
|
@ -316,7 +316,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth)
|
||||
static int synth_probe(struct spk_synth *synth)
|
||||
{
|
||||
unsigned int port_val = 0;
|
||||
int i = 0;
|
||||
int i;
|
||||
struct synth_settings *sp;
|
||||
|
||||
pr_info("Probing for DoubleTalk.\n");
|
||||
|
@ -254,7 +254,7 @@ static void synth_flush(struct spk_synth *synth)
|
||||
static int synth_probe(struct spk_synth *synth)
|
||||
{
|
||||
unsigned int port_val = 0;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
pr_info("Probing for %s.\n", synth->long_name);
|
||||
if (port_forced) {
|
||||
|
@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
|
||||
}
|
||||
|
||||
if (!ldisc_data->buf_free)
|
||||
/* ttyio_in will tty_schedule_flip */
|
||||
/* ttyio_in will tty_flip_buffer_push */
|
||||
return 0;
|
||||
|
||||
/* Make sure the consumer has read buf before we have seen
|
||||
@ -312,7 +312,7 @@ static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
|
||||
mb();
|
||||
ldisc_data->buf_free = true;
|
||||
/* Let TTY push more characters */
|
||||
tty_schedule_flip(tty->port);
|
||||
tty_flip_buffer_push(tty->port);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ menuconfig ACPI
|
||||
depends on ARCH_SUPPORTS_ACPI
|
||||
select PNP
|
||||
select NLS
|
||||
select CRC32
|
||||
default y if X86
|
||||
help
|
||||
Advanced Configuration and Power Interface (ACPI) support for
|
||||
@ -59,6 +60,9 @@ config ACPI_SYSTEM_POWER_STATES_SUPPORT
|
||||
config ACPI_CCA_REQUIRED
|
||||
bool
|
||||
|
||||
config ACPI_TABLE_LIB
|
||||
bool
|
||||
|
||||
config ACPI_DEBUGGER
|
||||
bool "AML debugger interface"
|
||||
select ACPI_DEBUG
|
||||
@ -71,7 +75,7 @@ config ACPI_DEBUGGER
|
||||
if ACPI_DEBUGGER
|
||||
|
||||
config ACPI_DEBUGGER_USER
|
||||
tristate "Userspace debugger accessiblity"
|
||||
tristate "Userspace debugger accessibility"
|
||||
depends on DEBUG_FS
|
||||
help
|
||||
Export /sys/kernel/debug/acpi/acpidbg for userspace utilities
|
||||
@ -517,6 +521,28 @@ config ACPI_CONFIGFS
|
||||
userspace. The configurable ACPI groups will be visible under
|
||||
/config/acpi, assuming configfs is mounted under /config.
|
||||
|
||||
config ACPI_PFRUT
|
||||
tristate "ACPI Platform Firmware Runtime Update and Telemetry"
|
||||
depends on 64BIT
|
||||
help
|
||||
This mechanism allows certain pieces of the platform firmware
|
||||
to be updated on the fly while the system is running (runtime)
|
||||
without the need to restart it, which is key in the cases when
|
||||
the system needs to be available 100% of the time and it cannot
|
||||
afford the downtime related to restarting it, or when the work
|
||||
carried out by the system is particularly important, so it cannot
|
||||
be interrupted, and it is not practical to wait until it is complete.
|
||||
|
||||
The existing firmware code can be modified (driver update) or
|
||||
extended by adding new code to the firmware (code injection).
|
||||
|
||||
Besides, the telemetry driver allows user space to fetch telemetry
|
||||
data from the firmware with the help of the Platform Firmware Runtime
|
||||
Telemetry interface.
|
||||
|
||||
To compile the drivers as modules, choose M here:
|
||||
the modules will be called pfr_update and pfr_telemetry.
|
||||
|
||||
if ARM64
|
||||
source "drivers/acpi/arm64/Kconfig"
|
||||
|
||||
@ -524,6 +550,23 @@ config ACPI_PPTT
|
||||
bool
|
||||
endif
|
||||
|
||||
config ACPI_PCC
|
||||
bool "ACPI PCC Address Space"
|
||||
depends on PCC
|
||||
default y
|
||||
help
|
||||
The PCC Address Space also referred as PCC Operation Region pertains
|
||||
to the region of PCC subspace that succeeds the PCC signature.
|
||||
|
||||
The PCC Operation Region works in conjunction with the PCC Table
|
||||
(Platform Communications Channel Table). PCC subspaces that are
|
||||
marked for use as PCC Operation Regions must not be used as PCC
|
||||
subspaces for the standard ACPI features such as CPPC, RASF, PDTT and
|
||||
MPST. These standard features must always use the PCC Table instead.
|
||||
|
||||
Enable this feature if you want to set up and install the PCC Address
|
||||
Space handler to handle PCC OpRegion in the firmware.
|
||||
|
||||
source "drivers/acpi/pmic/Kconfig"
|
||||
|
||||
config ACPI_VIOT
|
||||
|
@ -9,7 +9,7 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
|
||||
# ACPI Boot-Time Table Parsing
|
||||
#
|
||||
ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
|
||||
tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
|
||||
tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ;
|
||||
|
||||
endif
|
||||
|
||||
@ -67,6 +67,7 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
|
||||
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
|
||||
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
|
||||
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
|
||||
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
|
||||
|
||||
# Address translation
|
||||
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
|
||||
@ -102,6 +103,7 @@ obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
|
||||
obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
|
||||
obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
|
||||
obj-$(CONFIG_ACPI_PPTT) += pptt.o
|
||||
obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o
|
||||
|
||||
# processor has its own "processor." module_param namespace
|
||||
processor-y := processor_driver.o
|
||||
|
@ -48,19 +48,12 @@ static const struct acpi_device_id ac_device_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
|
||||
|
||||
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
|
||||
static const struct acpi_ac_bl acpi_ac_blacklist[] = {
|
||||
{ "INT33F4", -1 }, /* X-Powers AXP288 PMIC */
|
||||
{ "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int acpi_ac_resume(struct device *dev);
|
||||
#endif
|
||||
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
|
||||
|
||||
static int ac_sleep_before_get_state_ms;
|
||||
static int ac_check_pmic = 1;
|
||||
static int ac_only;
|
||||
|
||||
static struct acpi_driver acpi_ac_driver = {
|
||||
@ -200,12 +193,6 @@ static int __init thinkpad_e530_quirk(const struct dmi_system_id *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
ac_check_pmic = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ac_only_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
ac_only = 1;
|
||||
@ -214,13 +201,6 @@ static int __init ac_only_quirk(const struct dmi_system_id *d)
|
||||
|
||||
/* Please keep this list alphabetically sorted */
|
||||
static const struct dmi_system_id ac_dmi_table[] __initconst = {
|
||||
{
|
||||
/* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
|
||||
.callback = ac_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Kodlix GK45 returning incorrect state */
|
||||
.callback = ac_only_quirk,
|
||||
@ -228,15 +208,6 @@ static const struct dmi_system_id ac_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GK45"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
|
||||
.callback = ac_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */
|
||||
.callback = thinkpad_e530_quirk,
|
||||
@ -341,23 +312,15 @@ static int acpi_ac_remove(struct acpi_device *device)
|
||||
|
||||
static int __init acpi_ac_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int result;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
dmi_check_system(ac_dmi_table);
|
||||
if (acpi_quirk_skip_acpi_ac_and_battery())
|
||||
return -ENODEV;
|
||||
|
||||
if (ac_check_pmic) {
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
|
||||
acpi_ac_blacklist[i].hrv)) {
|
||||
pr_info("found native %s PMIC, not loading\n",
|
||||
acpi_ac_blacklist[i].hid);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
dmi_check_system(ac_dmi_table);
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_ac_driver);
|
||||
if (result < 0)
|
||||
|
@ -87,14 +87,23 @@ static int fch_misc_setup(struct apd_private_data *pdata)
|
||||
if (ret < 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj))
|
||||
clk_data->is_rv = obj->integer.value;
|
||||
if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
|
||||
clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
|
||||
GFP_KERNEL);
|
||||
|
||||
strcpy(clk_data->name, obj->string.pointer);
|
||||
} else {
|
||||
/* Set default name to mclk if entry missing in firmware */
|
||||
clk_data->name = "mclk";
|
||||
}
|
||||
|
||||
list_for_each_entry(rentry, &resource_list, node) {
|
||||
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
|
||||
resource_size(rentry->res));
|
||||
break;
|
||||
}
|
||||
if (!clk_data->base)
|
||||
return -ENOMEM;
|
||||
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
|
@ -712,14 +712,13 @@ static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
|
||||
|
||||
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
struct lpss_private_data *pdata;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
|
||||
if (WARN_ON(ret))
|
||||
return ret;
|
||||
if (WARN_ON(!adev))
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
if (pm_runtime_suspended(dev)) {
|
||||
@ -732,6 +731,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
|
||||
goto out;
|
||||
}
|
||||
*val = __lpss_reg_read(pdata, reg);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
@ -750,7 +750,7 @@ static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
|
||||
return sysfs_emit(buf, "%08x\n", ltr_value);
|
||||
}
|
||||
|
||||
static ssize_t lpss_ltr_mode_show(struct device *dev,
|
||||
@ -1266,7 +1266,8 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
|
||||
if (!id || !id->driver_data)
|
||||
return 0;
|
||||
|
||||
if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
|
||||
adev = ACPI_COMPANION(&pdev->dev);
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
pdata = acpi_driver_data(adev);
|
||||
|
@ -156,8 +156,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
|
||||
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
|
||||
{"BRI1400"}, /* Boca Research 33,600 ACF Modem */
|
||||
{"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */
|
||||
{"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */
|
||||
{"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
|
||||
{"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */
|
||||
{"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
|
||||
{"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
|
||||
|
@ -1733,13 +1733,12 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
|
||||
{
|
||||
struct acpi_device *device = context;
|
||||
struct acpi_device *sibling;
|
||||
int result;
|
||||
|
||||
if (handle == device->handle)
|
||||
return AE_CTRL_TERMINATE;
|
||||
|
||||
result = acpi_bus_get_device(handle, &sibling);
|
||||
if (result)
|
||||
sibling = acpi_fetch_acpi_dev(handle);
|
||||
if (!sibling)
|
||||
return AE_OK;
|
||||
|
||||
if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
|
||||
|
@ -223,6 +223,11 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
|
||||
u32 function,
|
||||
void *handler_context, void **region_context);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_data_table_region_setup(acpi_handle handle,
|
||||
u32 function,
|
||||
void *handler_context, void **region_context);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_default_region_setup(acpi_handle handle,
|
||||
u32 function,
|
||||
|
@ -138,6 +138,7 @@ struct acpi_object_region {
|
||||
union acpi_operand_object *next;
|
||||
acpi_physical_address address;
|
||||
u32 length;
|
||||
void *pointer; /* Only for data table regions */
|
||||
};
|
||||
|
||||
struct acpi_object_method {
|
||||
|
@ -35,7 +35,8 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
|
||||
|
||||
acpi_status
|
||||
acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
|
||||
acpi_physical_address address, u8 flags);
|
||||
acpi_physical_address address,
|
||||
u8 flags, struct acpi_table_header *table);
|
||||
|
||||
void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
|
||||
|
||||
@ -86,6 +87,7 @@ acpi_tb_release_table(struct acpi_table_header *table,
|
||||
acpi_status
|
||||
acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
u8 flags,
|
||||
struct acpi_table_header *table,
|
||||
u8 reload, u8 override, u32 *table_index);
|
||||
|
||||
void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc);
|
||||
@ -95,7 +97,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
|
||||
|
||||
acpi_status
|
||||
acpi_tb_install_and_load_table(acpi_physical_address address,
|
||||
u8 flags, u8 override, u32 *table_index);
|
||||
u8 flags,
|
||||
struct acpi_table_header *table,
|
||||
u8 override, u32 *table_index);
|
||||
|
||||
acpi_status acpi_tb_unload_table(u32 table_index);
|
||||
|
||||
|
@ -531,6 +531,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
|
||||
|
||||
obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
|
||||
obj_desc->region.length = table->length;
|
||||
obj_desc->region.pointer = table;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
|
||||
obj_desc,
|
||||
|
@ -386,7 +386,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
|
||||
case ACPI_ADR_SPACE_DATA_TABLE:
|
||||
|
||||
handler = acpi_ex_data_table_space_handler;
|
||||
setup = NULL;
|
||||
setup = acpi_ev_data_table_region_setup;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -162,6 +162,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
if (region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
|
||||
struct acpi_pcc_info *ctx =
|
||||
handler_desc->address_space.context;
|
||||
|
||||
ctx->internal_buffer =
|
||||
field_obj->field.internal_pcc_buffer;
|
||||
ctx->length = (u16)region_obj->region.length;
|
||||
ctx->subspace_id = (u8)region_obj->region.address;
|
||||
}
|
||||
|
||||
/*
|
||||
* We must exit the interpreter because the region setup will
|
||||
* potentially execute control methods (for example, the _REG method
|
||||
|
@ -406,6 +406,58 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_data_table_region_setup
|
||||
*
|
||||
* PARAMETERS: handle - Region we are interested in
|
||||
* function - Start or stop
|
||||
* handler_context - Address space handler context
|
||||
* region_context - Region specific context
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Setup a data_table_region
|
||||
*
|
||||
* MUTEX: Assumes namespace is not locked
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_data_table_region_setup(acpi_handle handle,
|
||||
u32 function,
|
||||
void *handler_context, void **region_context)
|
||||
{
|
||||
union acpi_operand_object *region_desc =
|
||||
(union acpi_operand_object *)handle;
|
||||
struct acpi_data_table_space_context *local_region_context;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_data_table_region_setup);
|
||||
|
||||
if (function == ACPI_REGION_DEACTIVATE) {
|
||||
if (*region_context) {
|
||||
ACPI_FREE(*region_context);
|
||||
*region_context = NULL;
|
||||
}
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Create a new context */
|
||||
|
||||
local_region_context =
|
||||
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_space_context));
|
||||
if (!(local_region_context)) {
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/* Save the data table pointer for use in the handler */
|
||||
|
||||
local_region_context->pointer = region_desc->region.pointer;
|
||||
|
||||
*region_context = local_region_context;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_default_region_setup
|
||||
|
@ -411,7 +411,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
|
||||
acpi_ex_exit_interpreter();
|
||||
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
|
||||
TRUE, &table_index);
|
||||
table, TRUE, &table_index);
|
||||
acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
|
@ -279,6 +279,7 @@ acpi_ex_create_region(u8 * aml_start,
|
||||
obj_desc->region.space_id = space_id;
|
||||
obj_desc->region.address = 0;
|
||||
obj_desc->region.length = 0;
|
||||
obj_desc->region.pointer = NULL;
|
||||
obj_desc->region.node = node;
|
||||
obj_desc->region.handler = NULL;
|
||||
obj_desc->common.flags &=
|
||||
|
@ -509,8 +509,15 @@ acpi_ex_data_table_space_handler(u32 function,
|
||||
u64 *value,
|
||||
void *handler_context, void *region_context)
|
||||
{
|
||||
struct acpi_data_table_space_context *mapping;
|
||||
char *pointer;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
|
||||
|
||||
mapping = (struct acpi_data_table_space_context *) region_context;
|
||||
pointer = ACPI_CAST_PTR(char, mapping->pointer) +
|
||||
(address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
|
||||
|
||||
/*
|
||||
* Perform the memory read or write. The bit_width was already
|
||||
* validated.
|
||||
@ -518,14 +525,14 @@ acpi_ex_data_table_space_handler(u32 function,
|
||||
switch (function) {
|
||||
case ACPI_READ:
|
||||
|
||||
memcpy(ACPI_CAST_PTR(char, value),
|
||||
ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width));
|
||||
memcpy(ACPI_CAST_PTR(char, value), pointer,
|
||||
ACPI_DIV_8(bit_width));
|
||||
break;
|
||||
|
||||
case ACPI_WRITE:
|
||||
|
||||
memcpy(ACPI_PHYSADDR_TO_PTR(address),
|
||||
ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
|
||||
memcpy(pointer, ACPI_CAST_PTR(char, value),
|
||||
ACPI_DIV_8(bit_width));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -89,14 +89,27 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
|
||||
{
|
||||
|
||||
/*
|
||||
* Initialize the table descriptor. Set the pointer to NULL, since the
|
||||
* table is not fully mapped at this time.
|
||||
* Initialize the table descriptor. Set the pointer to NULL for external
|
||||
* tables, since the table is not fully mapped at this time.
|
||||
*/
|
||||
memset(table_desc, 0, sizeof(struct acpi_table_desc));
|
||||
table_desc->address = address;
|
||||
table_desc->length = table->length;
|
||||
table_desc->flags = flags;
|
||||
ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
|
||||
|
||||
switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
|
||||
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
|
||||
|
||||
table_desc->pointer = table;
|
||||
break;
|
||||
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
@ -132,9 +145,7 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
|
||||
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
|
||||
|
||||
table = ACPI_CAST_PTR(struct acpi_table_header,
|
||||
ACPI_PHYSADDR_TO_PTR(table_desc->
|
||||
address));
|
||||
table = table_desc->pointer;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -196,6 +207,8 @@ acpi_tb_release_table(struct acpi_table_header *table,
|
||||
* PARAMETERS: table_desc - Table descriptor to be acquired
|
||||
* address - Address of the table
|
||||
* flags - Allocation flags of the table
|
||||
* table - Pointer to the table (required for virtual
|
||||
* origins, optional for physical)
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
@ -208,49 +221,52 @@ acpi_tb_release_table(struct acpi_table_header *table,
|
||||
|
||||
acpi_status
|
||||
acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
|
||||
acpi_physical_address address, u8 flags)
|
||||
acpi_physical_address address,
|
||||
u8 flags, struct acpi_table_header *table)
|
||||
{
|
||||
struct acpi_table_header *table_header;
|
||||
u8 mapped_table = FALSE;
|
||||
|
||||
switch (flags & ACPI_TABLE_ORIGIN_MASK) {
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
|
||||
|
||||
/* Get the length of the full table from the header */
|
||||
|
||||
table_header =
|
||||
acpi_os_map_memory(address,
|
||||
sizeof(struct acpi_table_header));
|
||||
if (!table_header) {
|
||||
return (AE_NO_MEMORY);
|
||||
if (!table) {
|
||||
table =
|
||||
acpi_os_map_memory(address,
|
||||
sizeof(struct
|
||||
acpi_table_header));
|
||||
if (!table) {
|
||||
return (AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
mapped_table = TRUE;
|
||||
}
|
||||
|
||||
acpi_tb_init_table_descriptor(table_desc, address, flags,
|
||||
table_header);
|
||||
acpi_os_unmap_memory(table_header,
|
||||
sizeof(struct acpi_table_header));
|
||||
return (AE_OK);
|
||||
break;
|
||||
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
|
||||
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
|
||||
|
||||
table_header = ACPI_CAST_PTR(struct acpi_table_header,
|
||||
ACPI_PHYSADDR_TO_PTR(address));
|
||||
if (!table_header) {
|
||||
return (AE_NO_MEMORY);
|
||||
if (!table) {
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
acpi_tb_init_table_descriptor(table_desc, address, flags,
|
||||
table_header);
|
||||
return (AE_OK);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
break;
|
||||
/* Table is not valid yet */
|
||||
|
||||
return (AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/* Table is not valid yet */
|
||||
acpi_tb_init_table_descriptor(table_desc, address, flags, table);
|
||||
if (mapped_table) {
|
||||
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
|
||||
}
|
||||
|
||||
return (AE_NO_MEMORY);
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
@ -335,7 +351,19 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
|
||||
|
||||
acpi_tb_release_table(table_desc->pointer, table_desc->length,
|
||||
table_desc->flags);
|
||||
table_desc->pointer = NULL;
|
||||
|
||||
switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
|
||||
|
||||
table_desc->pointer = NULL;
|
||||
break;
|
||||
|
||||
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
|
||||
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return_VOID;
|
||||
}
|
||||
@ -959,6 +987,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
|
||||
*
|
||||
* PARAMETERS: address - Physical address of the table
|
||||
* flags - Allocation flags of the table
|
||||
* table - Pointer to the table (required for
|
||||
* virtual origins, optional for
|
||||
* physical)
|
||||
* override - Whether override should be performed
|
||||
* table_index - Where table index is returned
|
||||
*
|
||||
@ -970,7 +1001,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
|
||||
|
||||
acpi_status
|
||||
acpi_tb_install_and_load_table(acpi_physical_address address,
|
||||
u8 flags, u8 override, u32 *table_index)
|
||||
u8 flags,
|
||||
struct acpi_table_header *table,
|
||||
u8 override, u32 *table_index)
|
||||
{
|
||||
acpi_status status;
|
||||
u32 i;
|
||||
@ -979,7 +1012,7 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
|
||||
|
||||
/* Install the table and load it into the namespace */
|
||||
|
||||
status = acpi_tb_install_standard_table(address, flags, TRUE,
|
||||
status = acpi_tb_install_standard_table(address, flags, table, TRUE,
|
||||
override, &i);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto exit;
|
||||
|
@ -313,7 +313,7 @@ void acpi_tb_parse_fadt(void)
|
||||
acpi_tb_install_standard_table((acpi_physical_address)acpi_gbl_FADT.
|
||||
Xdsdt,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
FALSE, TRUE, &acpi_gbl_dsdt_index);
|
||||
NULL, FALSE, TRUE, &acpi_gbl_dsdt_index);
|
||||
|
||||
/* If Hardware Reduced flag is set, there is no FACS */
|
||||
|
||||
@ -322,14 +322,14 @@ void acpi_tb_parse_fadt(void)
|
||||
acpi_tb_install_standard_table((acpi_physical_address)
|
||||
acpi_gbl_FADT.facs,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
FALSE, TRUE,
|
||||
NULL, FALSE, TRUE,
|
||||
&acpi_gbl_facs_index);
|
||||
}
|
||||
if (acpi_gbl_FADT.Xfacs) {
|
||||
acpi_tb_install_standard_table((acpi_physical_address)
|
||||
acpi_gbl_FADT.Xfacs,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
FALSE, TRUE,
|
||||
NULL, FALSE, TRUE,
|
||||
&acpi_gbl_xfacs_index);
|
||||
}
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
|
||||
* PARAMETERS: address - Address of the table (might be a virtual
|
||||
* address depending on the table_flags)
|
||||
* flags - Flags for the table
|
||||
* table - Pointer to the table (required for virtual
|
||||
* origins, optional for physical)
|
||||
* reload - Whether reload should be performed
|
||||
* override - Whether override should be performed
|
||||
* table_index - Where the table index is returned
|
||||
@ -96,6 +98,7 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
|
||||
acpi_status
|
||||
acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
u8 flags,
|
||||
struct acpi_table_header *table,
|
||||
u8 reload, u8 override, u32 *table_index)
|
||||
{
|
||||
u32 i;
|
||||
@ -106,7 +109,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
|
||||
/* Acquire a temporary table descriptor for validation */
|
||||
|
||||
status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
|
||||
status =
|
||||
acpi_tb_acquire_temp_table(&new_table_desc, address, flags, table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not acquire table length at %8.8X%8.8X",
|
||||
@ -209,7 +213,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
|
||||
if (ACPI_SUCCESS(status) && table) {
|
||||
acpi_tb_acquire_temp_table(&new_table_desc,
|
||||
ACPI_PTR_TO_PHYSADDR(table),
|
||||
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL);
|
||||
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
|
||||
table);
|
||||
ACPI_ERROR_ONLY(override_type = "Logical");
|
||||
goto finish_override;
|
||||
}
|
||||
@ -220,7 +225,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
|
||||
&address, &length);
|
||||
if (ACPI_SUCCESS(status) && address && length) {
|
||||
acpi_tb_acquire_temp_table(&new_table_desc, address,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
NULL);
|
||||
ACPI_ERROR_ONLY(override_type = "Physical");
|
||||
goto finish_override;
|
||||
}
|
||||
@ -289,7 +295,8 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
|
||||
|
||||
if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
|
||||
ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
|
||||
ACPI_FREE(table_desc->pointer);
|
||||
table_desc->pointer = NULL;
|
||||
}
|
||||
|
||||
table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
|
||||
|
@ -101,7 +101,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
|
||||
header->signature, ACPI_FORMAT_UINT64(address),
|
||||
header->length));
|
||||
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
|
||||
} else if (ACPI_VALIDATE_RSDP_SIG(ACPI_CAST_PTR(struct acpi_table_rsdp,
|
||||
header)->signature)) {
|
||||
|
||||
/* RSDP has no common fields */
|
||||
|
||||
|
@ -328,7 +328,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
|
||||
|
||||
status = acpi_tb_install_standard_table(address,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
FALSE, TRUE,
|
||||
NULL, FALSE, TRUE,
|
||||
&table_index);
|
||||
|
||||
if (ACPI_SUCCESS(status) &&
|
||||
|
@ -227,9 +227,7 @@ acpi_status acpi_tb_load_namespace(void)
|
||||
*
|
||||
* FUNCTION: acpi_install_table
|
||||
*
|
||||
* PARAMETERS: address - Address of the ACPI table to be installed.
|
||||
* physical - Whether the address is a physical table
|
||||
* address or not
|
||||
* PARAMETERS: table - Pointer to the ACPI table to be installed.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
@ -240,28 +238,54 @@ acpi_status acpi_tb_load_namespace(void)
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status ACPI_INIT_FUNCTION
|
||||
acpi_install_table(acpi_physical_address address, u8 physical)
|
||||
acpi_install_table(struct acpi_table_header *table)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 flags;
|
||||
u32 table_index;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_table);
|
||||
|
||||
if (physical) {
|
||||
flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL;
|
||||
} else {
|
||||
flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
|
||||
}
|
||||
|
||||
status = acpi_tb_install_standard_table(address, flags,
|
||||
FALSE, FALSE, &table_index);
|
||||
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
|
||||
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
|
||||
table, FALSE, FALSE,
|
||||
&table_index);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_physical_table
|
||||
*
|
||||
* PARAMETERS: address - Address of the ACPI table to be installed.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Dynamically install an ACPI table.
|
||||
* Note: This function should only be invoked after
|
||||
* acpi_initialize_tables() and before acpi_load_tables().
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status ACPI_INIT_FUNCTION
|
||||
acpi_install_physical_table(acpi_physical_address address)
|
||||
{
|
||||
acpi_status status;
|
||||
u32 table_index;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_physical_table);
|
||||
|
||||
status = acpi_tb_install_standard_table(address,
|
||||
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
|
||||
NULL, FALSE, FALSE,
|
||||
&table_index);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL_INIT(acpi_install_physical_table)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_load_table
|
||||
@ -298,7 +322,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
|
||||
ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
|
||||
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
|
||||
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
|
||||
FALSE, &table_index);
|
||||
table, FALSE, &table_index);
|
||||
if (table_idx) {
|
||||
*table_idx = table_index;
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
|
||||
{"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */
|
||||
{"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */
|
||||
{"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
|
||||
{"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */
|
||||
|
||||
/* Feature Group Strings */
|
||||
|
||||
|
@ -28,9 +28,10 @@
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "EINJ: " fmt
|
||||
|
||||
#define SPIN_UNIT 100 /* 100ns */
|
||||
/* Firmware should respond within 1 milliseconds */
|
||||
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
|
||||
#define SLEEP_UNIT_MIN 1000 /* 1ms */
|
||||
#define SLEEP_UNIT_MAX 5000 /* 5ms */
|
||||
/* Firmware should respond within 1 seconds */
|
||||
#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
|
||||
#define ACPI5_VENDOR_BIT BIT(31)
|
||||
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
|
||||
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
|
||||
@ -171,13 +172,13 @@ static int einj_get_available_error_type(u32 *type)
|
||||
|
||||
static int einj_timedout(u64 *t)
|
||||
{
|
||||
if ((s64)*t < SPIN_UNIT) {
|
||||
if ((s64)*t < SLEEP_UNIT_MIN) {
|
||||
pr_warn(FW_WARN "Firmware does not respond in time\n");
|
||||
return 1;
|
||||
}
|
||||
*t -= SPIN_UNIT;
|
||||
ndelay(SPIN_UNIT);
|
||||
touch_nmi_watchdog();
|
||||
*t -= SLEEP_UNIT_MIN;
|
||||
usleep_range(SLEEP_UNIT_MIN, SLEEP_UNIT_MAX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -544,7 +545,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
||||
((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
|
||||
!= REGION_INTERSECTS) &&
|
||||
(region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
|
||||
!= REGION_INTERSECTS)))
|
||||
!= REGION_INTERSECTS) &&
|
||||
!arch_is_platform_page(base_addr)))
|
||||
return -EINVAL;
|
||||
|
||||
inject:
|
||||
|
@ -449,7 +449,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
|
||||
return false;
|
||||
|
||||
pfn = PHYS_PFN(physical_addr);
|
||||
if (!pfn_valid(pfn)) {
|
||||
if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
|
||||
pr_warn_ratelimited(FW_WARN GHES_PFX
|
||||
"Invalid address in generic error data: %#llx\n",
|
||||
physical_addr);
|
||||
|
@ -86,7 +86,9 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
|
||||
return len;
|
||||
};
|
||||
|
||||
int apei_hest_parse(apei_hest_func_t func, void *data)
|
||||
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
|
||||
|
||||
static int apei_hest_parse(apei_hest_func_t func, void *data)
|
||||
{
|
||||
struct acpi_hest_header *hest_hdr;
|
||||
int i, rc, len;
|
||||
@ -121,7 +123,6 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(apei_hest_parse);
|
||||
|
||||
/*
|
||||
* Check if firmware advertises firmware first mode. We need FF bit to be set
|
||||
|
@ -52,7 +52,6 @@ static bool battery_driver_registered;
|
||||
static int battery_bix_broken_package;
|
||||
static int battery_notification_delay_ms;
|
||||
static int battery_ac_is_broken;
|
||||
static int battery_check_pmic = 1;
|
||||
static int battery_quirk_notcharging;
|
||||
static unsigned int cache_time = 1000;
|
||||
module_param(cache_time, uint, 0644);
|
||||
@ -60,16 +59,15 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
|
||||
|
||||
static const struct acpi_device_id battery_device_ids[] = {
|
||||
{"PNP0C0A", 0},
|
||||
|
||||
/* Microsoft Surface Go 3 */
|
||||
{"MSHW0146", 0},
|
||||
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
|
||||
|
||||
/* Lists of PMIC ACPI HIDs with an (often better) native battery driver */
|
||||
static const char * const acpi_battery_blacklist[] = {
|
||||
"INT33F4", /* X-Powers AXP288 PMIC */
|
||||
};
|
||||
|
||||
enum {
|
||||
ACPI_BATTERY_ALARM_PRESENT,
|
||||
ACPI_BATTERY_XINFO_PRESENT,
|
||||
@ -1107,13 +1105,6 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
battery_do_not_check_pmic_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_check_pmic = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init battery_quirk_not_charging(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_quirk_notcharging = 1;
|
||||
@ -1148,22 +1139,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
|
||||
.callback = battery_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
|
||||
.callback = battery_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* On Lenovo ThinkPads the BIOS specification defines
|
||||
@ -1177,6 +1152,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Microsoft Surface Go 3 */
|
||||
.callback = battery_notification_delay_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
@ -1301,19 +1284,12 @@ static struct acpi_driver acpi_battery_driver = {
|
||||
|
||||
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
|
||||
{
|
||||
unsigned int i;
|
||||
int result;
|
||||
|
||||
dmi_check_system(bat_dmi_table);
|
||||
if (acpi_quirk_skip_acpi_ac_and_battery())
|
||||
return;
|
||||
|
||||
if (battery_check_pmic) {
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
|
||||
pr_info("found native %s PMIC, not loading\n",
|
||||
acpi_battery_blacklist[i]);
|
||||
return;
|
||||
}
|
||||
}
|
||||
dmi_check_system(bat_dmi_table);
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_battery_driver);
|
||||
battery_driver_registered = (result == 0);
|
||||
|
@ -1043,6 +1043,7 @@ struct bus_type acpi_bus_type = {
|
||||
.remove = acpi_device_remove,
|
||||
.uevent = acpi_device_uevent,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_type);
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Initialization/Cleanup
|
||||
@ -1320,6 +1321,7 @@ static int __init acpi_init(void)
|
||||
pr_debug("%s: kset create error\n", __func__);
|
||||
|
||||
init_prmt();
|
||||
acpi_init_pcc();
|
||||
result = acpi_bus_init();
|
||||
if (result) {
|
||||
kobject_put(acpi_kobj);
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
struct cppc_pcc_data {
|
||||
struct mbox_chan *pcc_channel;
|
||||
struct pcc_mbox_chan *pcc_channel;
|
||||
void __iomem *pcc_comm_addr;
|
||||
bool pcc_channel_acquired;
|
||||
unsigned int deadline_us;
|
||||
@ -118,6 +118,8 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
|
||||
*/
|
||||
#define NUM_RETRIES 500ULL
|
||||
|
||||
#define OVER_16BTS_MASK ~0xFFFFULL
|
||||
|
||||
#define define_one_cppc_ro(_name) \
|
||||
static struct kobj_attribute _name = \
|
||||
__ATTR(_name, 0444, show_##_name, NULL)
|
||||
@ -179,10 +181,11 @@ static struct attribute *cppc_attrs[] = {
|
||||
&lowest_freq.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(cppc);
|
||||
|
||||
static struct kobj_type cppc_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.default_attrs = cppc_attrs,
|
||||
.default_groups = cppc_groups,
|
||||
};
|
||||
|
||||
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
|
||||
@ -295,7 +298,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
|
||||
pcc_ss_data->platform_owns_pcc = true;
|
||||
|
||||
/* Ring doorbell */
|
||||
ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
|
||||
ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
|
||||
if (ret < 0) {
|
||||
pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
|
||||
pcc_ss_id, cmd, ret);
|
||||
@ -308,10 +311,10 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
|
||||
if (pcc_ss_data->pcc_mrtt)
|
||||
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
|
||||
|
||||
if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
|
||||
mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
|
||||
if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
|
||||
mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
|
||||
else
|
||||
mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
|
||||
mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
|
||||
|
||||
end:
|
||||
if (cmd == CMD_WRITE) {
|
||||
@ -493,46 +496,33 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
|
||||
|
||||
static int register_pcc_channel(int pcc_ss_idx)
|
||||
{
|
||||
struct acpi_pcct_hw_reduced *cppc_ss;
|
||||
struct pcc_mbox_chan *pcc_chan;
|
||||
u64 usecs_lat;
|
||||
|
||||
if (pcc_ss_idx >= 0) {
|
||||
pcc_data[pcc_ss_idx]->pcc_channel =
|
||||
pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
|
||||
pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
|
||||
|
||||
if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
|
||||
if (IS_ERR(pcc_chan)) {
|
||||
pr_err("Failed to find PCC channel for subspace %d\n",
|
||||
pcc_ss_idx);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* The PCC mailbox controller driver should
|
||||
* have parsed the PCCT (global table of all
|
||||
* PCC channels) and stored pointers to the
|
||||
* subspace communication region in con_priv.
|
||||
*/
|
||||
cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
|
||||
|
||||
if (!cppc_ss) {
|
||||
pr_err("No PCC subspace found for %d CPPC\n",
|
||||
pcc_ss_idx);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
|
||||
/*
|
||||
* cppc_ss->latency is just a Nominal value. In reality
|
||||
* the remote processor could be much slower to reply.
|
||||
* So add an arbitrary amount of wait on top of Nominal.
|
||||
*/
|
||||
usecs_lat = NUM_RETRIES * cppc_ss->latency;
|
||||
usecs_lat = NUM_RETRIES * pcc_chan->latency;
|
||||
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
|
||||
pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
|
||||
pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
|
||||
pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
|
||||
pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
|
||||
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
|
||||
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
|
||||
|
||||
pcc_data[pcc_ss_idx]->pcc_comm_addr =
|
||||
acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
|
||||
acpi_os_ioremap(pcc_chan->shmem_base_addr,
|
||||
pcc_chan->shmem_size);
|
||||
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
|
||||
pr_err("Failed to ioremap PCC comm region mem for %d\n",
|
||||
pcc_ss_idx);
|
||||
@ -617,47 +607,30 @@ static bool is_cppc_supported(int revision, int num_ent)
|
||||
/*
|
||||
* An example CPC table looks like the following.
|
||||
*
|
||||
* Name(_CPC, Package()
|
||||
* {
|
||||
* 17,
|
||||
* NumEntries
|
||||
* 1,
|
||||
* // Revision
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
|
||||
* // Highest Performance
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
|
||||
* // Nominal Performance
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
|
||||
* // Lowest Nonlinear Performance
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
|
||||
* // Lowest Performance
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
|
||||
* // Guaranteed Performance Register
|
||||
* ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
|
||||
* // Desired Performance Register
|
||||
* ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
|
||||
* ..
|
||||
* ..
|
||||
* ..
|
||||
*
|
||||
* }
|
||||
* Name (_CPC, Package() {
|
||||
* 17, // NumEntries
|
||||
* 1, // Revision
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
|
||||
* ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
|
||||
* ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
|
||||
* ...
|
||||
* ...
|
||||
* ...
|
||||
* }
|
||||
* Each Register() encodes how to access that specific register.
|
||||
* e.g. a sample PCC entry has the following encoding:
|
||||
*
|
||||
* Register (
|
||||
* PCC,
|
||||
* AddressSpaceKeyword
|
||||
* 8,
|
||||
* //RegisterBitWidth
|
||||
* 8,
|
||||
* //RegisterBitOffset
|
||||
* 0x30,
|
||||
* //RegisterAddress
|
||||
* 9
|
||||
* //AccessSize (subspace ID)
|
||||
* 0
|
||||
* )
|
||||
* }
|
||||
* Register (
|
||||
* PCC, // AddressSpaceKeyword
|
||||
* 8, // RegisterBitWidth
|
||||
* 8, // RegisterBitOffset
|
||||
* 0x30, // RegisterAddress
|
||||
* 9, // AccessSize (subspace ID)
|
||||
* )
|
||||
*/
|
||||
|
||||
#ifndef init_freq_invariance_cppc
|
||||
@ -759,9 +732,26 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
||||
goto out_free;
|
||||
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
|
||||
}
|
||||
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
if (gas_t->access_width < 1 || gas_t->access_width > 3) {
|
||||
/*
|
||||
* 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
|
||||
* SystemIO doesn't implement 64-bit
|
||||
* registers.
|
||||
*/
|
||||
pr_debug("Invalid access width %d for SystemIO register\n",
|
||||
gas_t->access_width);
|
||||
goto out_free;
|
||||
}
|
||||
if (gas_t->address & OVER_16BTS_MASK) {
|
||||
/* SystemIO registers use 16-bit integer addresses */
|
||||
pr_debug("Invalid IO port %llu for SystemIO register\n",
|
||||
gas_t->address);
|
||||
goto out_free;
|
||||
}
|
||||
} else {
|
||||
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
|
||||
/* Support only PCC ,SYS MEM and FFH type regs */
|
||||
/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
|
||||
pr_debug("Unsupported register type: %d\n", gas_t->space_id);
|
||||
goto out_free;
|
||||
}
|
||||
@ -925,18 +915,33 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
|
||||
|
||||
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
|
||||
{
|
||||
int ret_val = 0;
|
||||
void __iomem *vaddr = NULL;
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
||||
struct cpc_reg *reg = ®_res->cpc_entry.reg;
|
||||
|
||||
if (reg_res->type == ACPI_TYPE_INTEGER) {
|
||||
*val = reg_res->cpc_entry.int_value;
|
||||
return ret_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*val = 0;
|
||||
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
|
||||
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
u32 width = 8 << (reg->access_width - 1);
|
||||
u32 val_u32;
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_os_read_port((acpi_io_address)reg->address,
|
||||
&val_u32, width);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_debug("Error: Failed to read SystemIO port %llx\n",
|
||||
reg->address);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
*val = val_u32;
|
||||
return 0;
|
||||
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
|
||||
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
|
||||
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
vaddr = reg_res->sys_mem_vaddr;
|
||||
@ -962,10 +967,10 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
|
||||
default:
|
||||
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
|
||||
reg->bit_width, pcc_ss_id);
|
||||
ret_val = -EFAULT;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
||||
@ -975,7 +980,20 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
||||
struct cpc_reg *reg = ®_res->cpc_entry.reg;
|
||||
|
||||
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
u32 width = 8 << (reg->access_width - 1);
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_os_write_port((acpi_io_address)reg->address,
|
||||
(u32)val, width);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_debug("Error: Failed to write SystemIO port %llx\n",
|
||||
reg->address);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
|
||||
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
|
||||
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
vaddr = reg_res->sys_mem_vaddr;
|
||||
@ -1242,6 +1260,51 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
|
||||
|
||||
/**
|
||||
* cppc_set_enable - Set to enable CPPC on the processor by writing the
|
||||
* Continuous Performance Control package EnableRegister field.
|
||||
* @cpu: CPU for which to enable CPPC register.
|
||||
* @enable: 0 - disable, 1 - enable CPPC feature on the processor.
|
||||
*
|
||||
* Return: 0 for success, -ERRNO or -EIO otherwise.
|
||||
*/
|
||||
int cppc_set_enable(int cpu, bool enable)
|
||||
{
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
||||
struct cpc_register_resource *enable_reg;
|
||||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
|
||||
struct cppc_pcc_data *pcc_ss_data = NULL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
enable_reg = &cpc_desc->cpc_regs[ENABLE];
|
||||
|
||||
if (CPC_IN_PCC(enable_reg)) {
|
||||
|
||||
if (pcc_ss_id < 0)
|
||||
return -EIO;
|
||||
|
||||
ret = cpc_write(cpu, enable_reg, enable);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
|
||||
down_write(&pcc_ss_data->pcc_lock);
|
||||
/* after writing CPC, transfer the ownership of PCC to platfrom */
|
||||
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
|
||||
up_write(&pcc_ss_data->pcc_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return cpc_write(cpu, enable_reg, enable);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_set_enable);
|
||||
|
||||
/**
|
||||
* cppc_set_perf - Set a CPU's performance controls.
|
||||
* @cpu: CPU for which to set performance controls.
|
||||
|
@ -285,14 +285,12 @@ EXPORT_SYMBOL(acpi_device_set_power);
|
||||
|
||||
int acpi_bus_set_power(acpi_handle handle, int state)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
int result;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
if (result)
|
||||
return result;
|
||||
if (device)
|
||||
return acpi_device_set_power(device, state);
|
||||
|
||||
return acpi_device_set_power(device, state);
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_bus_set_power);
|
||||
|
||||
@ -410,21 +408,20 @@ EXPORT_SYMBOL_GPL(acpi_device_update_power);
|
||||
|
||||
int acpi_bus_update_power(acpi_handle handle, int *state_p)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
int result;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
return result ? result : acpi_device_update_power(device, state_p);
|
||||
if (device)
|
||||
return acpi_device_update_power(device, state_p);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
|
||||
|
||||
bool acpi_bus_power_manageable(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
int result;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
return result ? false : device->flags.power_manageable;
|
||||
return device && device->flags.power_manageable;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_bus_power_manageable);
|
||||
|
||||
@ -543,11 +540,9 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
|
||||
|
||||
bool acpi_bus_can_wakeup(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
int result;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
return result ? false : device->wakeup.flags.valid;
|
||||
return device && device->wakeup.flags.valid;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_bus_can_wakeup);
|
||||
|
||||
@ -1400,4 +1395,30 @@ bool acpi_storage_d3(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_storage_d3);
|
||||
|
||||
/**
|
||||
* acpi_dev_state_d0 - Tell if the device is in D0 power state
|
||||
* @dev: Physical device the ACPI power state of which to check
|
||||
*
|
||||
* On a system without ACPI, return true. On a system with ACPI, return true if
|
||||
* the current ACPI power state of the device is D0, or false otherwise.
|
||||
*
|
||||
* Note that the power state of a device is not well-defined after it has been
|
||||
* passed to acpi_device_set_power() and before that function returns, so it is
|
||||
* not valid to ask for the ACPI power state of the device in that time frame.
|
||||
*
|
||||
* This function is intended to be used in a driver's probe or remove
|
||||
* function. See Documentation/firmware-guide/acpi/low-power-probe.rst for
|
||||
* more information.
|
||||
*/
|
||||
bool acpi_dev_state_d0(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
|
||||
if (!adev)
|
||||
return true;
|
||||
|
||||
return adev->power.state == ACPI_STATE_D0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_state_d0);
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
@ -53,6 +53,7 @@ static struct attribute *acpi_data_node_default_attrs[] = {
|
||||
&data_node_path.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(acpi_data_node_default);
|
||||
|
||||
#define to_data_node(k) container_of(k, struct acpi_data_node, kobj)
|
||||
#define to_attr(a) container_of(a, struct acpi_data_node_attr, attr)
|
||||
@ -79,7 +80,7 @@ static void acpi_data_node_release(struct kobject *kobj)
|
||||
|
||||
static struct kobj_type acpi_data_node_ktype = {
|
||||
.sysfs_ops = &acpi_data_node_sysfs_ops,
|
||||
.default_attrs = acpi_data_node_default_attrs,
|
||||
.default_groups = acpi_data_node_default_groups,
|
||||
.release = acpi_data_node_release,
|
||||
};
|
||||
|
||||
|
@ -489,10 +489,9 @@ static ssize_t docked_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dock_station *dock_station = dev->platform_data;
|
||||
struct acpi_device *adev = NULL;
|
||||
struct acpi_device *adev = acpi_fetch_acpi_dev(dock_station->handle);
|
||||
|
||||
acpi_bus_get_device(dock_station->handle, &adev);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
|
||||
return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(docked);
|
||||
|
||||
@ -504,7 +503,7 @@ static ssize_t flags_show(struct device *dev,
|
||||
{
|
||||
struct dock_station *dock_station = dev->platform_data;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
|
||||
return sysfs_emit(buf, "%d\n", dock_station->flags);
|
||||
|
||||
}
|
||||
static DEVICE_ATTR_RO(flags);
|
||||
@ -543,7 +542,7 @@ static ssize_t uid_show(struct device *dev,
|
||||
if (ACPI_FAILURE(status))
|
||||
return 0;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
|
||||
return sysfs_emit(buf, "%llx\n", lbuf);
|
||||
}
|
||||
static DEVICE_ATTR_RO(uid);
|
||||
|
||||
@ -562,7 +561,7 @@ static ssize_t type_show(struct device *dev,
|
||||
else
|
||||
type = "unknown";
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", type);
|
||||
return sysfs_emit(buf, "%s\n", type);
|
||||
}
|
||||
static DEVICE_ATTR_RO(type);
|
||||
|
||||
|
@ -46,7 +46,7 @@ static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp
|
||||
}
|
||||
|
||||
/*
|
||||
* Presentation of attributes which are defined for INT1045
|
||||
* Presentation of attributes which are defined for INTC10xx
|
||||
* They are:
|
||||
* freq_mhz_low_clock : Set PCH FIVR switching freq for
|
||||
* FIVR clock 19.2MHz and 24MHz
|
||||
@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
|
||||
static const struct acpi_device_id pch_fivr_device_ids[] = {
|
||||
{"INTC1045", 0},
|
||||
{"INTC1049", 0},
|
||||
{"INTC10A3", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
|
||||
|
@ -231,6 +231,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
|
||||
{"INTC1050", 0},
|
||||
{"INTC1060", 0},
|
||||
{"INTC1061", 0},
|
||||
{"INTC10A4", 0},
|
||||
{"INTC10A5", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
|
||||
|
@ -37,6 +37,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
|
||||
{"INTC1050"},
|
||||
{"INTC1060"},
|
||||
{"INTC1061"},
|
||||
{"INTC10A0"},
|
||||
{"INTC10A1"},
|
||||
{"INTC10A2"},
|
||||
{"INTC10A3"},
|
||||
{"INTC10A4"},
|
||||
{"INTC10A5"},
|
||||
{""},
|
||||
};
|
||||
|
||||
|
@ -92,8 +92,6 @@ enum ec_command {
|
||||
|
||||
enum {
|
||||
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
|
||||
EC_FLAGS_QUERY_PENDING, /* Query is pending */
|
||||
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
|
||||
EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
|
||||
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
|
||||
EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
|
||||
@ -133,7 +131,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8;
|
||||
module_param(ec_storm_threshold, uint, 0644);
|
||||
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
|
||||
|
||||
static bool ec_freeze_events __read_mostly = false;
|
||||
static bool ec_freeze_events __read_mostly;
|
||||
module_param(ec_freeze_events, bool, 0644);
|
||||
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
|
||||
|
||||
@ -169,16 +167,15 @@ struct acpi_ec_query {
|
||||
struct acpi_ec *ec;
|
||||
};
|
||||
|
||||
static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
|
||||
static void advance_transaction(struct acpi_ec *ec, bool interrupt);
|
||||
static int acpi_ec_submit_query(struct acpi_ec *ec);
|
||||
static bool advance_transaction(struct acpi_ec *ec, bool interrupt);
|
||||
static void acpi_ec_event_handler(struct work_struct *work);
|
||||
static void acpi_ec_event_processor(struct work_struct *work);
|
||||
|
||||
struct acpi_ec *first_ec;
|
||||
EXPORT_SYMBOL(first_ec);
|
||||
|
||||
static struct acpi_ec *boot_ec;
|
||||
static bool boot_ec_is_ecdt = false;
|
||||
static bool boot_ec_is_ecdt;
|
||||
static struct workqueue_struct *ec_wq;
|
||||
static struct workqueue_struct *ec_query_wq;
|
||||
|
||||
@ -444,25 +441,51 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void acpi_ec_submit_query(struct acpi_ec *ec)
|
||||
static bool acpi_ec_submit_event(struct acpi_ec *ec)
|
||||
{
|
||||
acpi_ec_mask_events(ec);
|
||||
if (!acpi_ec_event_enabled(ec))
|
||||
return;
|
||||
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
|
||||
return false;
|
||||
|
||||
if (ec->event_state == EC_EVENT_READY) {
|
||||
ec_dbg_evt("Command(%s) submitted/blocked",
|
||||
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
|
||||
ec->nr_pending_queries++;
|
||||
|
||||
ec->event_state = EC_EVENT_IN_PROGRESS;
|
||||
/*
|
||||
* If events_to_process is greqter than 0 at this point, the
|
||||
* while () loop in acpi_ec_event_handler() is still running
|
||||
* and incrementing events_to_process will cause it to invoke
|
||||
* acpi_ec_submit_query() once more, so it is not necessary to
|
||||
* queue up the event work to start the same loop again.
|
||||
*/
|
||||
if (ec->events_to_process++ > 0)
|
||||
return true;
|
||||
|
||||
ec->events_in_progress++;
|
||||
queue_work(ec_wq, &ec->work);
|
||||
return queue_work(ec_wq, &ec->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* The event handling work has not been completed yet, so it needs to be
|
||||
* flushed.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
static void acpi_ec_complete_query(struct acpi_ec *ec)
|
||||
static void acpi_ec_complete_event(struct acpi_ec *ec)
|
||||
{
|
||||
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
|
||||
if (ec->event_state == EC_EVENT_IN_PROGRESS)
|
||||
ec->event_state = EC_EVENT_COMPLETE;
|
||||
}
|
||||
|
||||
static void acpi_ec_close_event(struct acpi_ec *ec)
|
||||
{
|
||||
if (ec->event_state != EC_EVENT_READY)
|
||||
ec_dbg_evt("Command(%s) unblocked",
|
||||
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
|
||||
|
||||
ec->event_state = EC_EVENT_READY;
|
||||
acpi_ec_unmask_events(ec);
|
||||
}
|
||||
|
||||
@ -489,12 +512,10 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
|
||||
*/
|
||||
static void acpi_ec_clear(struct acpi_ec *ec)
|
||||
{
|
||||
int i, status;
|
||||
u8 value = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
|
||||
status = acpi_ec_query(ec, &value);
|
||||
if (status || !value)
|
||||
if (acpi_ec_submit_query(ec))
|
||||
break;
|
||||
}
|
||||
if (unlikely(i == ACPI_EC_CLEAR_MAX))
|
||||
@ -551,8 +572,8 @@ void acpi_ec_flush_work(void)
|
||||
|
||||
static bool acpi_ec_guard_event(struct acpi_ec *ec)
|
||||
{
|
||||
bool guarded = true;
|
||||
unsigned long flags;
|
||||
bool guarded;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
/*
|
||||
@ -561,19 +582,15 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
|
||||
* evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
|
||||
* acceptable period.
|
||||
*
|
||||
* The guarding period begins when EC_FLAGS_QUERY_PENDING is
|
||||
* flagged, which means SCI_EVT check has just been performed.
|
||||
* But if the current transaction is ACPI_EC_COMMAND_QUERY, the
|
||||
* guarding should have already been performed (via
|
||||
* EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
|
||||
* ACPI_EC_COMMAND_QUERY transaction can be transitioned into
|
||||
* ACPI_EC_COMMAND_POLL state immediately.
|
||||
* The guarding period is applicable if the event state is not
|
||||
* EC_EVENT_READY, but otherwise if the current transaction is of the
|
||||
* ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already
|
||||
* and it should not be applied to let the transaction transition into
|
||||
* the ACPI_EC_COMMAND_POLL state immediately.
|
||||
*/
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
|
||||
ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
|
||||
!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
|
||||
(ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
|
||||
guarded = false;
|
||||
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
|
||||
ec->event_state != EC_EVENT_READY &&
|
||||
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
return guarded;
|
||||
}
|
||||
@ -605,16 +622,26 @@ static int ec_transaction_completed(struct acpi_ec *ec)
|
||||
static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
|
||||
{
|
||||
ec->curr->flags |= flag;
|
||||
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
|
||||
flag == ACPI_EC_COMMAND_POLL)
|
||||
acpi_ec_complete_query(ec);
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
|
||||
flag == ACPI_EC_COMMAND_COMPLETE)
|
||||
acpi_ec_complete_query(ec);
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
|
||||
flag == ACPI_EC_COMMAND_COMPLETE)
|
||||
set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
|
||||
|
||||
if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
|
||||
return;
|
||||
|
||||
switch (ec_event_clearing) {
|
||||
case ACPI_EC_EVT_TIMING_STATUS:
|
||||
if (flag == ACPI_EC_COMMAND_POLL)
|
||||
acpi_ec_close_event(ec);
|
||||
|
||||
return;
|
||||
|
||||
case ACPI_EC_EVT_TIMING_QUERY:
|
||||
if (flag == ACPI_EC_COMMAND_COMPLETE)
|
||||
acpi_ec_close_event(ec);
|
||||
|
||||
return;
|
||||
|
||||
case ACPI_EC_EVT_TIMING_EVENT:
|
||||
if (flag == ACPI_EC_COMMAND_COMPLETE)
|
||||
acpi_ec_complete_event(ec);
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,10 +655,11 @@ static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t
|
||||
acpi_ec_mask_events(ec);
|
||||
}
|
||||
|
||||
static void advance_transaction(struct acpi_ec *ec, bool interrupt)
|
||||
static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
|
||||
{
|
||||
struct transaction *t = ec->curr;
|
||||
bool wakeup = false;
|
||||
bool ret = false;
|
||||
u8 status;
|
||||
|
||||
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
|
||||
@ -659,11 +687,9 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)
|
||||
*/
|
||||
if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
|
||||
(!ec->nr_pending_queries ||
|
||||
test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
|
||||
clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
|
||||
acpi_ec_complete_query(ec);
|
||||
}
|
||||
ec->event_state == EC_EVENT_COMPLETE)
|
||||
acpi_ec_close_event(ec);
|
||||
|
||||
if (!t)
|
||||
goto out;
|
||||
}
|
||||
@ -698,10 +724,12 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)
|
||||
|
||||
out:
|
||||
if (status & ACPI_EC_FLAG_SCI)
|
||||
acpi_ec_submit_query(ec);
|
||||
ret = acpi_ec_submit_event(ec);
|
||||
|
||||
if (wakeup && interrupt)
|
||||
wake_up(&ec->wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void start_transaction(struct acpi_ec *ec)
|
||||
@ -1105,33 +1133,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
|
||||
|
||||
static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
|
||||
{
|
||||
struct acpi_ec_query *q;
|
||||
struct transaction *t;
|
||||
|
||||
q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
INIT_WORK(&q->work, acpi_ec_event_processor);
|
||||
t = &q->transaction;
|
||||
t->command = ACPI_EC_COMMAND_QUERY;
|
||||
t->rdata = pval;
|
||||
t->rlen = 1;
|
||||
q->ec = ec;
|
||||
return q;
|
||||
}
|
||||
|
||||
static void acpi_ec_delete_query(struct acpi_ec_query *q)
|
||||
{
|
||||
if (q) {
|
||||
if (q->handler)
|
||||
acpi_ec_put_query_handler(q->handler);
|
||||
kfree(q);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_ec_event_processor(struct work_struct *work)
|
||||
{
|
||||
struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
|
||||
@ -1151,14 +1152,33 @@ static void acpi_ec_event_processor(struct work_struct *work)
|
||||
ec->queries_in_progress--;
|
||||
spin_unlock_irq(&ec->lock);
|
||||
|
||||
acpi_ec_delete_query(q);
|
||||
acpi_ec_put_query_handler(handler);
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
||||
static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
|
||||
{
|
||||
struct acpi_ec_query *q;
|
||||
struct transaction *t;
|
||||
|
||||
q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
INIT_WORK(&q->work, acpi_ec_event_processor);
|
||||
t = &q->transaction;
|
||||
t->command = ACPI_EC_COMMAND_QUERY;
|
||||
t->rdata = pval;
|
||||
t->rlen = 1;
|
||||
q->ec = ec;
|
||||
return q;
|
||||
}
|
||||
|
||||
static int acpi_ec_submit_query(struct acpi_ec *ec)
|
||||
{
|
||||
struct acpi_ec_query *q;
|
||||
u8 value = 0;
|
||||
int result;
|
||||
struct acpi_ec_query *q;
|
||||
|
||||
q = acpi_ec_create_query(ec, &value);
|
||||
if (!q)
|
||||
@ -1170,11 +1190,14 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
||||
* bit to be cleared (and thus clearing the interrupt source).
|
||||
*/
|
||||
result = acpi_ec_transaction(ec, &q->transaction);
|
||||
if (!value)
|
||||
result = -ENODATA;
|
||||
if (result)
|
||||
goto err_exit;
|
||||
|
||||
if (!value) {
|
||||
result = -ENODATA;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
q->handler = acpi_ec_get_query_handler_by_value(ec, value);
|
||||
if (!q->handler) {
|
||||
result = -ENODATA;
|
||||
@ -1197,66 +1220,58 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_exit:
|
||||
if (result)
|
||||
acpi_ec_delete_query(q);
|
||||
if (data)
|
||||
*data = value;
|
||||
kfree(q);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void acpi_ec_check_event(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
|
||||
if (ec_guard(ec)) {
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
/*
|
||||
* Take care of the SCI_EVT unless no one else is
|
||||
* taking care of it.
|
||||
*/
|
||||
if (!ec->curr)
|
||||
advance_transaction(ec, false);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_ec_event_handler(struct work_struct *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
|
||||
|
||||
ec_dbg_evt("Event started");
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
while (ec->nr_pending_queries) {
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
(void)acpi_ec_query(ec, NULL);
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
ec->nr_pending_queries--;
|
||||
/*
|
||||
* Before exit, make sure that this work item can be
|
||||
* scheduled again. There might be QR_EC failures, leaving
|
||||
* EC_FLAGS_QUERY_PENDING uncleared and preventing this work
|
||||
* item from being scheduled again.
|
||||
*/
|
||||
if (!ec->nr_pending_queries) {
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
|
||||
ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
|
||||
acpi_ec_complete_query(ec);
|
||||
}
|
||||
spin_lock_irq(&ec->lock);
|
||||
|
||||
while (ec->events_to_process) {
|
||||
spin_unlock_irq(&ec->lock);
|
||||
|
||||
acpi_ec_submit_query(ec);
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
ec->events_to_process--;
|
||||
}
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
|
||||
/*
|
||||
* Before exit, make sure that the it will be possible to queue up the
|
||||
* event handling work again regardless of whether or not the query
|
||||
* queued up above is processed successfully.
|
||||
*/
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT)
|
||||
acpi_ec_complete_event(ec);
|
||||
else
|
||||
acpi_ec_close_event(ec);
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
|
||||
ec_dbg_evt("Event stopped");
|
||||
|
||||
acpi_ec_check_event(ec);
|
||||
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec_guard(ec)) {
|
||||
spin_lock_irq(&ec->lock);
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
/* Take care of SCI_EVT unless someone else is doing that. */
|
||||
if (!ec->curr)
|
||||
advance_transaction(ec, false);
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
}
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
ec->events_in_progress--;
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock_irq(&ec->lock);
|
||||
}
|
||||
|
||||
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
|
||||
@ -2038,8 +2053,7 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
|
||||
|
||||
bool acpi_ec_dispatch_gpe(void)
|
||||
{
|
||||
bool work_in_progress;
|
||||
u32 ret;
|
||||
bool work_in_progress = false;
|
||||
|
||||
if (!first_ec)
|
||||
return acpi_any_gpe_status_set(U32_MAX);
|
||||
@ -2051,13 +2065,31 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
if (acpi_any_gpe_status_set(first_ec->gpe))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case there
|
||||
* are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the SCI will
|
||||
* retrigger after the rearming in acpi_s2idle_wake(), so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
|
||||
/*
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
*/
|
||||
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
|
||||
if (ret == ACPI_INTERRUPT_HANDLED)
|
||||
pm_pr_dbg("ACPI EC GPE dispatched\n");
|
||||
spin_lock_irq(&first_ec->lock);
|
||||
|
||||
if (acpi_ec_gpe_status_set(first_ec))
|
||||
work_in_progress = advance_transaction(first_ec, false);
|
||||
|
||||
spin_unlock_irq(&first_ec->lock);
|
||||
|
||||
if (!work_in_progress)
|
||||
return false;
|
||||
|
||||
pm_pr_dbg("ACPI EC GPE dispatched\n");
|
||||
|
||||
/* Drain EC work. */
|
||||
do {
|
||||
@ -2181,6 +2213,13 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "HP ZHAN 66 Pro",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
|
||||
},
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -19,7 +19,7 @@ MODULE_DESCRIPTION("ACPI EC sysfs access driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static bool write_support;
|
||||
module_param(write_support, bool, 0644);
|
||||
module_param_hw(write_support, bool, other, 0644);
|
||||
MODULE_PARM_DESC(write_support, "Dangerous, reboot and removal of battery may "
|
||||
"be needed.");
|
||||
|
||||
|
@ -10,4 +10,5 @@
|
||||
{"INT3404", }, /* Fan */ \
|
||||
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
|
||||
{"INTC1048", }, /* Fan for Alder Lake generation */ \
|
||||
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
|
||||
{"PNP0C0B", } /* Generic ACPI fan */
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-acpi.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "internal.h"
|
||||
@ -111,13 +113,10 @@ struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(adev, &parent->children, node) {
|
||||
unsigned long long addr;
|
||||
acpi_status status;
|
||||
acpi_bus_address addr = acpi_device_adr(adev);
|
||||
int score;
|
||||
|
||||
status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR,
|
||||
NULL, &addr);
|
||||
if (ACPI_FAILURE(status) || addr != address)
|
||||
if (!adev->pnp.type.bus_address || addr != address)
|
||||
continue;
|
||||
|
||||
if (!ret) {
|
||||
@ -287,12 +286,13 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
|
||||
|
||||
void acpi_device_notify(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *type = acpi_get_bus_type(dev);
|
||||
struct acpi_device *adev;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bind_one(dev, NULL);
|
||||
if (ret) {
|
||||
struct acpi_bus_type *type = acpi_get_bus_type(dev);
|
||||
|
||||
if (!type)
|
||||
goto err;
|
||||
|
||||
@ -304,17 +304,26 @@ void acpi_device_notify(struct device *dev)
|
||||
ret = acpi_bind_one(dev, adev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (type->setup) {
|
||||
type->setup(dev);
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
adev = ACPI_COMPANION(dev);
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
pci_acpi_setup(dev, adev);
|
||||
goto done;
|
||||
} else if (dev_is_platform(dev)) {
|
||||
acpi_configure_pmsi_domain(dev);
|
||||
}
|
||||
}
|
||||
adev = ACPI_COMPANION(dev);
|
||||
|
||||
if (dev_is_platform(dev))
|
||||
acpi_configure_pmsi_domain(dev);
|
||||
|
||||
if (type && type->setup)
|
||||
type->setup(dev);
|
||||
else if (adev->handler && adev->handler->bind)
|
||||
if (adev->handler && adev->handler->bind)
|
||||
adev->handler->bind(dev);
|
||||
|
||||
done:
|
||||
acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
|
||||
dev_name(dev));
|
||||
|
||||
@ -327,14 +336,12 @@ void acpi_device_notify(struct device *dev)
|
||||
void acpi_device_notify_remove(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
struct acpi_bus_type *type;
|
||||
|
||||
if (!adev)
|
||||
return;
|
||||
|
||||
type = acpi_get_bus_type(dev);
|
||||
if (type && type->cleanup)
|
||||
type->cleanup(dev);
|
||||
if (dev_is_pci(dev))
|
||||
pci_acpi_cleanup(dev, adev);
|
||||
else if (adev->handler && adev->handler->unbind)
|
||||
adev->handler->unbind(dev);
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
int early_acpi_osi_init(void);
|
||||
int acpi_osi_init(void);
|
||||
acpi_status acpi_os_initialize1(void);
|
||||
int acpi_scan_init(void);
|
||||
void acpi_scan_init(void);
|
||||
#ifdef CONFIG_PCI
|
||||
void acpi_pci_root_init(void);
|
||||
void acpi_pci_link_init(void);
|
||||
@ -166,6 +166,13 @@ static inline void acpi_early_processor_osc(void) {}
|
||||
/* --------------------------------------------------------------------------
|
||||
Embedded Controller
|
||||
-------------------------------------------------------------------------- */
|
||||
|
||||
enum acpi_ec_event_state {
|
||||
EC_EVENT_READY = 0, /* Event work can be submitted */
|
||||
EC_EVENT_IN_PROGRESS, /* Event work is pending or being processed */
|
||||
EC_EVENT_COMPLETE, /* Event work processing has completed */
|
||||
};
|
||||
|
||||
struct acpi_ec {
|
||||
acpi_handle handle;
|
||||
int gpe;
|
||||
@ -182,7 +189,8 @@ struct acpi_ec {
|
||||
spinlock_t lock;
|
||||
struct work_struct work;
|
||||
unsigned long timestamp;
|
||||
unsigned long nr_pending_queries;
|
||||
enum acpi_ec_event_state event_state;
|
||||
unsigned int events_to_process;
|
||||
unsigned int events_in_progress;
|
||||
unsigned int queries_in_progress;
|
||||
bool busy_polling;
|
||||
|
@ -678,10 +678,12 @@ static const char *spa_type_name(u16 type)
|
||||
|
||||
int nfit_spa_type(struct acpi_nfit_system_address *spa)
|
||||
{
|
||||
guid_t guid;
|
||||
int i;
|
||||
|
||||
import_guid(&guid, spa->range_guid);
|
||||
for (i = 0; i < NFIT_UUID_MAX; i++)
|
||||
if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
|
||||
if (guid_equal(to_nfit_uuid(i), &guid))
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
@ -254,9 +254,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||
}
|
||||
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
|
||||
goto out_err;
|
||||
hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
|
||||
if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
|
||||
goto out_err;
|
||||
hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
|
||||
(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
|
||||
|
||||
start = ma->base_address;
|
||||
end = start + ma->length;
|
||||
@ -298,6 +297,47 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||
out_err:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
|
||||
void *arg, const unsigned long table_end)
|
||||
{
|
||||
struct acpi_cedt_cfmws *cfmws;
|
||||
int *fake_pxm = arg;
|
||||
u64 start, end;
|
||||
int node;
|
||||
|
||||
cfmws = (struct acpi_cedt_cfmws *)header;
|
||||
start = cfmws->base_hpa;
|
||||
end = cfmws->base_hpa + cfmws->window_size;
|
||||
|
||||
/* Skip if the SRAT already described the NUMA details for this HPA */
|
||||
node = phys_to_target_node(start);
|
||||
if (node != NUMA_NO_NODE)
|
||||
return 0;
|
||||
|
||||
node = acpi_map_pxm_to_node(*fake_pxm);
|
||||
|
||||
if (node == NUMA_NO_NODE) {
|
||||
pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (numa_add_memblk(node, start, end) < 0) {
|
||||
/* CXL driver must handle the NUMA_NO_NODE case */
|
||||
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
|
||||
node, start, end);
|
||||
}
|
||||
|
||||
/* Set the next available fake_pxm value */
|
||||
(*fake_pxm)++;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
|
||||
void *arg, const unsigned long table_end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
|
||||
|
||||
static int __init acpi_parse_slit(struct acpi_table_header *table)
|
||||
@ -442,7 +482,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
|
||||
|
||||
int __init acpi_numa_init(void)
|
||||
{
|
||||
int cnt = 0;
|
||||
int i, fake_pxm, cnt = 0;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -EINVAL;
|
||||
@ -478,6 +518,22 @@ int __init acpi_numa_init(void)
|
||||
/* SLIT: System Locality Information Table */
|
||||
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
|
||||
|
||||
/*
|
||||
* CXL Fixed Memory Window Structures (CFMWS) must be parsed
|
||||
* after the SRAT. Create NUMA Nodes for CXL memory ranges that
|
||||
* are defined in the CFMWS and not already defined in the SRAT.
|
||||
* Initialize a fake_pxm as the first available PXM to emulate.
|
||||
*/
|
||||
|
||||
/* fake_pxm is the next unused PXM value after SRAT parsing */
|
||||
for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
|
||||
if (node_to_pxm_map[i] > fake_pxm)
|
||||
fake_pxm = node_to_pxm_map[i];
|
||||
}
|
||||
fake_pxm++;
|
||||
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
|
||||
&fake_pxm);
|
||||
|
||||
if (cnt < 0)
|
||||
return cnt;
|
||||
else if (!parsed_numa_memblks)
|
||||
|
@ -606,12 +606,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
|
||||
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
|
||||
int *polarity, char **name)
|
||||
{
|
||||
int result;
|
||||
struct acpi_device *device;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_pci_link *link;
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
if (result) {
|
||||
if (!device) {
|
||||
acpi_handle_err(handle, "Invalid link device\n");
|
||||
return -1;
|
||||
}
|
||||
@ -658,12 +656,10 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
|
||||
*/
|
||||
int acpi_pci_link_free_irq(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_pci_link *link;
|
||||
acpi_status result;
|
||||
|
||||
result = acpi_bus_get_device(handle, &device);
|
||||
if (result) {
|
||||
if (!device) {
|
||||
acpi_handle_err(handle, "Invalid link device\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -67,11 +67,10 @@ static struct acpi_scan_handler pci_root_handler = {
|
||||
*/
|
||||
int acpi_is_root_bridge(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
int ret;
|
||||
struct acpi_device *device;
|
||||
|
||||
ret = acpi_bus_get_device(handle, &device);
|
||||
if (ret)
|
||||
if (!device)
|
||||
return 0;
|
||||
|
||||
ret = acpi_match_device_ids(device, root_device_ids);
|
||||
@ -199,40 +198,26 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
|
||||
acpi_status status;
|
||||
u32 result, capbuf[3];
|
||||
|
||||
support &= OSC_PCI_SUPPORT_MASKS;
|
||||
support |= root->osc_support_set;
|
||||
|
||||
capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
|
||||
capbuf[OSC_SUPPORT_DWORD] = support;
|
||||
if (control) {
|
||||
*control &= OSC_PCI_CONTROL_MASKS;
|
||||
capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
|
||||
} else {
|
||||
/* Run _OSC query only with existing controls. */
|
||||
capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
|
||||
}
|
||||
capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
|
||||
|
||||
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
root->osc_support_set = support;
|
||||
if (control)
|
||||
*control = result;
|
||||
*control = result;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
|
||||
{
|
||||
return acpi_pci_query_osc(root, flags, NULL);
|
||||
}
|
||||
|
||||
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_pci_root *root;
|
||||
struct acpi_device *device;
|
||||
|
||||
if (acpi_bus_get_device(handle, &device) ||
|
||||
acpi_match_device_ids(device, root_device_ids))
|
||||
if (!device || acpi_match_device_ids(device, root_device_ids))
|
||||
return NULL;
|
||||
|
||||
root = acpi_driver_data(device);
|
||||
@ -337,7 +322,7 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
|
||||
* acpi_pci_osc_control_set - Request control of PCI root _OSC features.
|
||||
* @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
|
||||
* @mask: Mask of _OSC bits to request control of, place to store control mask.
|
||||
* @req: Mask of _OSC bits the control of is essential to the caller.
|
||||
* @support: _OSC supported capability.
|
||||
*
|
||||
* Run _OSC query for @mask and if that is successful, compare the returned
|
||||
* mask of control bits with @req. If all of the @req bits are set in the
|
||||
@ -348,8 +333,9 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
|
||||
* _OSC bits the BIOS has granted control of, but its contents are meaningless
|
||||
* on failure.
|
||||
**/
|
||||
static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
|
||||
static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 support)
|
||||
{
|
||||
u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL;
|
||||
struct acpi_pci_root *root;
|
||||
acpi_status status;
|
||||
u32 ctrl, capbuf[3];
|
||||
@ -357,22 +343,16 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 r
|
||||
if (!mask)
|
||||
return AE_BAD_PARAMETER;
|
||||
|
||||
ctrl = *mask & OSC_PCI_CONTROL_MASKS;
|
||||
if ((ctrl & req) != req)
|
||||
return AE_TYPE;
|
||||
|
||||
root = acpi_pci_find_root(handle);
|
||||
if (!root)
|
||||
return AE_NOT_EXIST;
|
||||
|
||||
*mask = ctrl | root->osc_control_set;
|
||||
/* No need to evaluate _OSC if the control was already granted. */
|
||||
if ((root->osc_control_set & ctrl) == ctrl)
|
||||
return AE_OK;
|
||||
ctrl = *mask;
|
||||
*mask |= root->osc_control_set;
|
||||
|
||||
/* Need to check the available controls bits before requesting them. */
|
||||
while (*mask) {
|
||||
status = acpi_pci_query_osc(root, root->osc_support_set, mask);
|
||||
do {
|
||||
status = acpi_pci_query_osc(root, support, mask);
|
||||
if (ACPI_FAILURE(status))
|
||||
return status;
|
||||
if (ctrl == *mask)
|
||||
@ -380,7 +360,11 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 r
|
||||
decode_osc_control(root, "platform does not support",
|
||||
ctrl & ~(*mask));
|
||||
ctrl = *mask;
|
||||
}
|
||||
} while (*mask);
|
||||
|
||||
/* No need to request _OSC if the control was already granted. */
|
||||
if ((root->osc_control_set & ctrl) == ctrl)
|
||||
return AE_OK;
|
||||
|
||||
if ((ctrl & req) != req) {
|
||||
decode_osc_control(root, "not requesting control; platform does not support",
|
||||
@ -399,25 +383,9 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 r
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
bool is_pcie)
|
||||
static u32 calculate_support(void)
|
||||
{
|
||||
u32 support, control, requested;
|
||||
acpi_status status;
|
||||
struct acpi_device *device = root->device;
|
||||
acpi_handle handle = device->handle;
|
||||
|
||||
/*
|
||||
* Apple always return failure on _OSC calls when _OSI("Darwin") has
|
||||
* been called successfully. We know the feature set supported by the
|
||||
* platform, so avoid calling _OSC at all
|
||||
*/
|
||||
if (x86_apple_machine) {
|
||||
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
|
||||
decode_osc_control(root, "OS assumes control of",
|
||||
root->osc_control_set);
|
||||
return;
|
||||
}
|
||||
u32 support;
|
||||
|
||||
/*
|
||||
* All supported architectures that use ACPI have support for
|
||||
@ -434,30 +402,12 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
if (IS_ENABLED(CONFIG_PCIE_EDR))
|
||||
support |= OSC_PCI_EDR_SUPPORT;
|
||||
|
||||
decode_osc_support(root, "OS supports", support);
|
||||
status = acpi_pci_osc_support(root, support);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
*no_aspm = 1;
|
||||
return support;
|
||||
}
|
||||
|
||||
/* _OSC is optional for PCI host bridges */
|
||||
if ((status == AE_NOT_FOUND) && !is_pcie)
|
||||
return;
|
||||
|
||||
dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
|
||||
acpi_format_exception(status));
|
||||
return;
|
||||
}
|
||||
|
||||
if (pcie_ports_disabled) {
|
||||
dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
|
||||
decode_osc_support(root, "not requesting OS control; OS requires",
|
||||
ACPI_PCIE_REQ_SUPPORT);
|
||||
return;
|
||||
}
|
||||
static u32 calculate_control(void)
|
||||
{
|
||||
u32 control;
|
||||
|
||||
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
|
||||
| OSC_PCI_EXPRESS_PME_CONTROL;
|
||||
@ -483,11 +433,59 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR))
|
||||
control |= OSC_PCI_EXPRESS_DPC_CONTROL;
|
||||
|
||||
requested = control;
|
||||
status = acpi_pci_osc_control_set(handle, &control,
|
||||
OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
|
||||
return control;
|
||||
}
|
||||
|
||||
static bool os_control_query_checks(struct acpi_pci_root *root, u32 support)
|
||||
{
|
||||
struct acpi_device *device = root->device;
|
||||
|
||||
if (pcie_ports_disabled) {
|
||||
dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
|
||||
decode_osc_support(root, "not requesting OS control; OS requires",
|
||||
ACPI_PCIE_REQ_SUPPORT);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
bool is_pcie)
|
||||
{
|
||||
u32 support, control = 0, requested = 0;
|
||||
acpi_status status;
|
||||
struct acpi_device *device = root->device;
|
||||
acpi_handle handle = device->handle;
|
||||
|
||||
/*
|
||||
* Apple always return failure on _OSC calls when _OSI("Darwin") has
|
||||
* been called successfully. We know the feature set supported by the
|
||||
* platform, so avoid calling _OSC at all
|
||||
*/
|
||||
if (x86_apple_machine) {
|
||||
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
|
||||
decode_osc_control(root, "OS assumes control of",
|
||||
root->osc_control_set);
|
||||
return;
|
||||
}
|
||||
|
||||
support = calculate_support();
|
||||
|
||||
decode_osc_support(root, "OS supports", support);
|
||||
|
||||
if (os_control_query_checks(root, support))
|
||||
requested = control = calculate_control();
|
||||
|
||||
status = acpi_pci_osc_control_set(handle, &control, support);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
decode_osc_control(root, "OS now controls", control);
|
||||
if (control)
|
||||
decode_osc_control(root, "OS now controls", control);
|
||||
|
||||
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
|
||||
/*
|
||||
* We have ASPM control, but the FADT indicates that
|
||||
@ -498,11 +496,6 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
*no_aspm = 1;
|
||||
}
|
||||
} else {
|
||||
decode_osc_control(root, "OS requested", requested);
|
||||
decode_osc_control(root, "platform willing to grant", control);
|
||||
dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
|
||||
acpi_format_exception(status));
|
||||
|
||||
/*
|
||||
* We want to disable ASPM here, but aspm_disabled
|
||||
* needs to remain in its state from boot so that we
|
||||
@ -511,6 +504,18 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
|
||||
* root scan.
|
||||
*/
|
||||
*no_aspm = 1;
|
||||
|
||||
/* _OSC is optional for PCI host bridges */
|
||||
if ((status == AE_NOT_FOUND) && !is_pcie)
|
||||
return;
|
||||
|
||||
if (control) {
|
||||
decode_osc_control(root, "OS requested", requested);
|
||||
decode_osc_control(root, "platform willing to grant", control);
|
||||
}
|
||||
|
||||
dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
|
||||
acpi_format_exception(status));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ struct intel_pmic_opregion {
|
||||
struct mutex lock;
|
||||
struct acpi_lpat_conversion_table *lpat_table;
|
||||
struct regmap *regmap;
|
||||
struct intel_pmic_opregion_data *data;
|
||||
const struct intel_pmic_opregion_data *data;
|
||||
struct intel_pmic_regs_handler_ctx ctx;
|
||||
};
|
||||
|
||||
@ -53,7 +53,7 @@ static acpi_status intel_pmic_power_handler(u32 function,
|
||||
{
|
||||
struct intel_pmic_opregion *opregion = region_context;
|
||||
struct regmap *regmap = opregion->regmap;
|
||||
struct intel_pmic_opregion_data *d = opregion->data;
|
||||
const struct intel_pmic_opregion_data *d = opregion->data;
|
||||
int reg, bit, result;
|
||||
|
||||
if (bits != 32 || !value64)
|
||||
@ -95,7 +95,7 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
|
||||
return 0;
|
||||
}
|
||||
|
||||
temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
|
||||
temp = opregion->data->lpat_raw_to_temp(opregion->lpat_table, raw_temp);
|
||||
if (temp < 0)
|
||||
return temp;
|
||||
|
||||
@ -135,7 +135,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
|
||||
static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
|
||||
int bit, u32 function, u64 *value)
|
||||
{
|
||||
struct intel_pmic_opregion_data *d = opregion->data;
|
||||
const struct intel_pmic_opregion_data *d = opregion->data;
|
||||
struct regmap *regmap = opregion->regmap;
|
||||
|
||||
if (!d->get_policy || !d->update_policy)
|
||||
@ -171,7 +171,7 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
|
||||
void *handler_context, void *region_context)
|
||||
{
|
||||
struct intel_pmic_opregion *opregion = region_context;
|
||||
struct intel_pmic_opregion_data *d = opregion->data;
|
||||
const struct intel_pmic_opregion_data *d = opregion->data;
|
||||
int reg, bit, result;
|
||||
|
||||
if (bits != 32 || !value64)
|
||||
@ -255,7 +255,7 @@ static acpi_status intel_pmic_regs_handler(u32 function,
|
||||
|
||||
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
|
||||
struct regmap *regmap,
|
||||
struct intel_pmic_opregion_data *d)
|
||||
const struct intel_pmic_opregion_data *d)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct intel_pmic_opregion *opregion;
|
||||
@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
|
||||
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
|
||||
u32 value, u32 mask)
|
||||
{
|
||||
struct intel_pmic_opregion_data *d;
|
||||
const struct intel_pmic_opregion_data *d;
|
||||
int ret;
|
||||
|
||||
if (!intel_pmic_opregion) {
|
||||
|
@ -2,6 +2,8 @@
|
||||
#ifndef __INTEL_PMIC_H
|
||||
#define __INTEL_PMIC_H
|
||||
|
||||
#include <acpi/acpi_lpat.h>
|
||||
|
||||
struct pmic_table {
|
||||
int address; /* operation region address */
|
||||
int reg; /* corresponding thermal register */
|
||||
@ -17,6 +19,8 @@ struct intel_pmic_opregion_data {
|
||||
int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
|
||||
int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
|
||||
u32 reg_address, u32 value, u32 mask);
|
||||
int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table,
|
||||
int raw);
|
||||
struct pmic_table *power_table;
|
||||
int power_table_count;
|
||||
struct pmic_table *thermal_table;
|
||||
@ -25,6 +29,8 @@ struct intel_pmic_opregion_data {
|
||||
int pmic_i2c_address;
|
||||
};
|
||||
|
||||
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
|
||||
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
|
||||
struct regmap *regmap,
|
||||
const struct intel_pmic_opregion_data *d);
|
||||
|
||||
#endif
|
||||
|
@ -369,13 +369,14 @@ intel_bxtwc_pmic_update_policy(struct regmap *regmap,
|
||||
return regmap_update_bits(regmap, reg, mask, val);
|
||||
}
|
||||
|
||||
static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
|
||||
static const struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
|
||||
.get_power = intel_bxtwc_pmic_get_power,
|
||||
.update_power = intel_bxtwc_pmic_update_power,
|
||||
.get_raw_temp = intel_bxtwc_pmic_get_raw_temp,
|
||||
.update_aux = intel_bxtwc_pmic_update_aux,
|
||||
.get_policy = intel_bxtwc_pmic_get_policy,
|
||||
.update_policy = intel_bxtwc_pmic_update_policy,
|
||||
.lpat_raw_to_temp = acpi_lpat_raw_to_temp,
|
||||
.power_table = power_table,
|
||||
.power_table_count = ARRAY_SIZE(power_table),
|
||||
.thermal_table = thermal_table,
|
||||
|
@ -271,13 +271,14 @@ static int intel_crc_pmic_update_policy(struct regmap *regmap,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
|
||||
static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
|
||||
.get_power = intel_crc_pmic_get_power,
|
||||
.update_power = intel_crc_pmic_update_power,
|
||||
.get_raw_temp = intel_crc_pmic_get_raw_temp,
|
||||
.update_aux = intel_crc_pmic_update_aux,
|
||||
.get_policy = intel_crc_pmic_get_policy,
|
||||
.update_policy = intel_crc_pmic_update_policy,
|
||||
.lpat_raw_to_temp = acpi_lpat_raw_to_temp,
|
||||
.power_table = power_table,
|
||||
.power_table_count= ARRAY_SIZE(power_table),
|
||||
.thermal_table = thermal_table,
|
||||
|
@ -23,7 +23,8 @@
|
||||
* intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a
|
||||
* CHT Crystal Cove PMIC.
|
||||
*/
|
||||
static struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
|
||||
static const struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
|
||||
.lpat_raw_to_temp = acpi_lpat_raw_to_temp,
|
||||
.pmic_i2c_address = 0x6e,
|
||||
};
|
||||
|
||||
|
@ -94,10 +94,11 @@ static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
|
||||
return ((buf[0] & 0x03) << 8) | buf[1];
|
||||
}
|
||||
|
||||
static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
|
||||
static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
|
||||
.get_power = chtdc_ti_pmic_get_power,
|
||||
.update_power = chtdc_ti_pmic_update_power,
|
||||
.get_raw_temp = chtdc_ti_pmic_get_raw_temp,
|
||||
.lpat_raw_to_temp = acpi_lpat_raw_to_temp,
|
||||
.power_table = chtdc_ti_power_table,
|
||||
.power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
|
||||
.thermal_table = chtdc_ti_thermal_table,
|
||||
|
@ -253,10 +253,11 @@ static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
|
||||
* The thermal table and ops are empty, we do not support the Thermal opregion
|
||||
* (DPTF) due to lacking documentation.
|
||||
*/
|
||||
static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
|
||||
static const struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
|
||||
.get_power = intel_cht_wc_pmic_get_power,
|
||||
.update_power = intel_cht_wc_pmic_update_power,
|
||||
.exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
|
||||
.lpat_raw_to_temp = acpi_lpat_raw_to_temp,
|
||||
.power_table = power_table,
|
||||
.power_table_count = ARRAY_SIZE(power_table),
|
||||
};
|
||||
|
@ -293,11 +293,33 @@ static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
|
||||
static int intel_xpower_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
|
||||
int raw)
|
||||
{
|
||||
struct acpi_lpat first = lpat_table->lpat[0];
|
||||
struct acpi_lpat last = lpat_table->lpat[lpat_table->lpat_count - 1];
|
||||
|
||||
/*
|
||||
* Some LPAT tables in the ACPI Device for the AXP288 PMIC for some
|
||||
* reason only describe a small temperature range, e.g. 27° - 37°
|
||||
* Celcius. Resulting in errors when the tablet is idle in a cool room.
|
||||
*
|
||||
* To avoid these errors clamp the raw value to be inside the LPAT.
|
||||
*/
|
||||
if (first.raw < last.raw)
|
||||
raw = clamp(raw, first.raw, last.raw);
|
||||
else
|
||||
raw = clamp(raw, last.raw, first.raw);
|
||||
|
||||
return acpi_lpat_raw_to_temp(lpat_table, raw);
|
||||
}
|
||||
|
||||
static const struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
|
||||
.get_power = intel_xpower_pmic_get_power,
|
||||
.update_power = intel_xpower_pmic_update_power,
|
||||
.get_raw_temp = intel_xpower_pmic_get_raw_temp,
|
||||
.exec_mipi_pmic_seq_element = intel_xpower_exec_mipi_pmic_seq_element,
|
||||
.lpat_raw_to_temp = intel_xpower_lpat_raw_to_temp,
|
||||
.power_table = power_table,
|
||||
.power_table_count = ARRAY_SIZE(power_table),
|
||||
.thermal_table = thermal_table,
|
||||
|
@ -81,9 +81,9 @@ struct acpi_power_resource *to_power_resource(struct acpi_device *device)
|
||||
|
||||
static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
if (acpi_bus_get_device(handle, &device))
|
||||
if (!device)
|
||||
return NULL;
|
||||
|
||||
return to_power_resource(device);
|
||||
@ -716,6 +716,9 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
|
||||
|
||||
mutex_lock(&acpi_device_lock);
|
||||
|
||||
dev_dbg(&dev->dev, "Enabling wakeup power (count %d)\n",
|
||||
dev->wakeup.prepare_count);
|
||||
|
||||
if (dev->wakeup.prepare_count++)
|
||||
goto out;
|
||||
|
||||
@ -731,8 +734,13 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
|
||||
* put into arbitrary power state afterward.
|
||||
*/
|
||||
err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
|
||||
if (err)
|
||||
if (err) {
|
||||
acpi_power_off_list(&dev->wakeup.resources);
|
||||
dev->wakeup.prepare_count = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(&dev->dev, "Wakeup power enabled\n");
|
||||
|
||||
out:
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
@ -755,6 +763,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
|
||||
|
||||
mutex_lock(&acpi_device_lock);
|
||||
|
||||
dev_dbg(&dev->dev, "Disabling wakeup power (count %d)\n",
|
||||
dev->wakeup.prepare_count);
|
||||
|
||||
/* Do nothing if wakeup power has not been enabled for this device. */
|
||||
if (dev->wakeup.prepare_count <= 0)
|
||||
goto out;
|
||||
@ -780,8 +791,11 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
|
||||
if (err) {
|
||||
dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
|
||||
dev->wakeup.flags.valid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(&dev->dev, "Wakeup power disabled\n");
|
||||
|
||||
out:
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
return err;
|
||||
@ -914,14 +928,14 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
|
||||
|
||||
struct acpi_device *acpi_add_power_resource(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_power_resource *resource;
|
||||
struct acpi_device *device = NULL;
|
||||
union acpi_object acpi_object;
|
||||
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
|
||||
acpi_status status;
|
||||
u8 state_dummy;
|
||||
int result;
|
||||
|
||||
acpi_bus_get_device(handle, &device);
|
||||
if (device)
|
||||
return device;
|
||||
|
||||
@ -947,6 +961,10 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
|
||||
resource->order = acpi_object.power_resource.resource_order;
|
||||
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
|
||||
|
||||
/* Get the initial state or just flip it on if that fails. */
|
||||
if (acpi_power_get_state(resource, &state_dummy))
|
||||
__acpi_power_on(resource);
|
||||
|
||||
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
|
||||
|
||||
device->flags.match_driver = true;
|
||||
|
@ -746,6 +746,73 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
|
||||
ACPI_PPTT_PHYSICAL_PACKAGE);
|
||||
}
|
||||
|
||||
/**
|
||||
* find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value
|
||||
* @cpu: Kernel logical CPU number
|
||||
*
|
||||
* Determine a topology unique cluster ID for the given CPU/thread.
|
||||
* This ID can then be used to group peers, which will have matching ids.
|
||||
*
|
||||
* The cluster, if present is the level of topology above CPUs. In a
|
||||
* multi-thread CPU, it will be the level above the CPU, not the thread.
|
||||
* It may not exist in single CPU systems. In simple multi-CPU systems,
|
||||
* it may be equal to the package topology level.
|
||||
*
|
||||
* Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found
|
||||
* or there is no toplogy level above the CPU..
|
||||
* Otherwise returns a value which represents the package for this CPU.
|
||||
*/
|
||||
|
||||
int find_acpi_cpu_topology_cluster(unsigned int cpu)
|
||||
{
|
||||
struct acpi_table_header *table;
|
||||
acpi_status status;
|
||||
struct acpi_pptt_processor *cpu_node, *cluster_node;
|
||||
u32 acpi_cpu_id;
|
||||
int retval;
|
||||
int is_thread;
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_pptt_warn_missing();
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
|
||||
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
|
||||
if (cpu_node == NULL || !cpu_node->parent) {
|
||||
retval = -ENOENT;
|
||||
goto put_table;
|
||||
}
|
||||
|
||||
is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
|
||||
cluster_node = fetch_pptt_node(table, cpu_node->parent);
|
||||
if (cluster_node == NULL) {
|
||||
retval = -ENOENT;
|
||||
goto put_table;
|
||||
}
|
||||
if (is_thread) {
|
||||
if (!cluster_node->parent) {
|
||||
retval = -ENOENT;
|
||||
goto put_table;
|
||||
}
|
||||
cluster_node = fetch_pptt_node(table, cluster_node->parent);
|
||||
if (cluster_node == NULL) {
|
||||
retval = -ENOENT;
|
||||
goto put_table;
|
||||
}
|
||||
}
|
||||
if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
|
||||
retval = cluster_node->acpi_processor_id;
|
||||
else
|
||||
retval = ACPI_PTR_DIFF(cluster_node, table);
|
||||
|
||||
put_table:
|
||||
acpi_put_table(table);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
|
||||
* @cpu: Kernel logical CPU number
|
||||
|
@ -49,7 +49,6 @@ struct prm_context_buffer {
|
||||
};
|
||||
#pragma pack()
|
||||
|
||||
|
||||
static LIST_HEAD(prm_module_list);
|
||||
|
||||
struct prm_handler_info {
|
||||
@ -73,7 +72,6 @@ struct prm_module_info {
|
||||
struct prm_handler_info handlers[];
|
||||
};
|
||||
|
||||
|
||||
static u64 efi_pa_va_lookup(u64 pa)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
@ -88,7 +86,6 @@ static u64 efi_pa_va_lookup(u64 pa)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
|
||||
#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
|
||||
|
||||
@ -99,7 +96,7 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
||||
struct acpi_prmt_handler_info *handler_info;
|
||||
struct prm_handler_info *th;
|
||||
struct prm_module_info *tm;
|
||||
u64 mmio_count = 0;
|
||||
u64 *mmio_count;
|
||||
u64 cur_handler = 0;
|
||||
u32 module_info_size = 0;
|
||||
u64 mmio_range_size = 0;
|
||||
@ -108,6 +105,8 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
||||
module_info = (struct acpi_prmt_module_info *) header;
|
||||
module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
|
||||
tm = kmalloc(module_info_size, GFP_KERNEL);
|
||||
if (!tm)
|
||||
goto parse_prmt_out1;
|
||||
|
||||
guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
|
||||
tm->major_rev = module_info->major_rev;
|
||||
@ -120,14 +119,24 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
||||
* Each module is associated with a list of addr
|
||||
* ranges that it can use during the service
|
||||
*/
|
||||
mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
|
||||
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
|
||||
mmio_count = (u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
|
||||
if (!mmio_count)
|
||||
goto parse_prmt_out2;
|
||||
|
||||
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, *mmio_count);
|
||||
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
|
||||
if (!tm->mmio_info)
|
||||
goto parse_prmt_out3;
|
||||
|
||||
temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
|
||||
if (!temp_mmio)
|
||||
goto parse_prmt_out4;
|
||||
memmove(tm->mmio_info, temp_mmio, mmio_range_size);
|
||||
} else {
|
||||
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
|
||||
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
|
||||
tm->mmio_info = kmalloc(sizeof(*tm->mmio_info), GFP_KERNEL);
|
||||
if (!tm->mmio_info)
|
||||
goto parse_prmt_out2;
|
||||
|
||||
tm->mmio_info->mmio_count = 0;
|
||||
}
|
||||
|
||||
@ -145,6 +154,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
||||
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
|
||||
|
||||
return 0;
|
||||
|
||||
parse_prmt_out4:
|
||||
kfree(tm->mmio_info);
|
||||
parse_prmt_out3:
|
||||
memunmap(mmio_count);
|
||||
parse_prmt_out2:
|
||||
kfree(tm);
|
||||
parse_prmt_out1:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#define GET_MODULE 0
|
||||
@ -171,7 +189,6 @@ static void *find_guid_info(const guid_t *guid, u8 mode)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static struct prm_module_info *find_prm_module(const guid_t *guid)
|
||||
{
|
||||
return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
|
||||
|
@ -98,8 +98,13 @@ static int acpi_soft_cpu_online(unsigned int cpu)
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
struct acpi_device *device;
|
||||
|
||||
if (!pr || acpi_bus_get_device(pr->handle, &device))
|
||||
if (!pr)
|
||||
return 0;
|
||||
|
||||
device = acpi_fetch_acpi_dev(pr->handle);
|
||||
if (!device)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* CPU got physically hotplugged and onlined for the first time:
|
||||
* Initialize missing things.
|
||||
@ -125,9 +130,8 @@ static int acpi_soft_cpu_online(unsigned int cpu)
|
||||
static int acpi_soft_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
struct acpi_device *device;
|
||||
|
||||
if (!pr || acpi_bus_get_device(pr->handle, &device))
|
||||
if (!pr || !acpi_fetch_acpi_dev(pr->handle))
|
||||
return 0;
|
||||
|
||||
acpi_processor_reevaluate_tstate(pr, true);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <acpi/processor.h>
|
||||
|
||||
/*
|
||||
@ -95,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
|
||||
(void *)1},
|
||||
/* T40 can not handle C3 idle state */
|
||||
{ set_max_cstate, "IBM ThinkPad T40", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
|
||||
(void *)2},
|
||||
{},
|
||||
};
|
||||
|
||||
@ -400,13 +406,10 @@ static int acpi_cst_latency_cmp(const void *a, const void *b)
|
||||
static void acpi_cst_latency_swap(void *a, void *b, int n)
|
||||
{
|
||||
struct acpi_processor_cx *x = a, *y = b;
|
||||
u32 tmp;
|
||||
|
||||
if (!(x->valid && y->valid))
|
||||
return;
|
||||
tmp = x->latency;
|
||||
x->latency = y->latency;
|
||||
y->latency = tmp;
|
||||
swap(x->latency, y->latency);
|
||||
}
|
||||
|
||||
static int acpi_processor_power_verify(struct acpi_processor *pr)
|
||||
@ -567,7 +570,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
||||
{
|
||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
if (cx->type == ACPI_STATE_C3)
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
while (1) {
|
||||
|
||||
@ -789,7 +793,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
|
||||
state->enter = acpi_idle_enter;
|
||||
|
||||
state->flags = 0;
|
||||
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
|
||||
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
|
||||
cx->type == ACPI_STATE_C3) {
|
||||
state->enter_dead = acpi_idle_play_dead;
|
||||
drv->safe_state_index = count;
|
||||
}
|
||||
@ -1100,7 +1105,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
|
||||
|
||||
status = acpi_get_parent(handle, &pr_ahandle);
|
||||
while (ACPI_SUCCESS(status)) {
|
||||
acpi_bus_get_device(pr_ahandle, &d);
|
||||
d = acpi_fetch_acpi_dev(pr_ahandle);
|
||||
handle = pr_ahandle;
|
||||
|
||||
if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
|
||||
|
@ -53,10 +53,17 @@ static int phys_package_first_cpu(int cpu)
|
||||
|
||||
static int cpu_has_cpufreq(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy policy;
|
||||
if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
if (!acpi_processor_cpufreq_init)
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (policy) {
|
||||
cpufreq_cpu_put(policy);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_get_max_state(unsigned int cpu)
|
||||
|
@ -687,9 +687,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
if (index)
|
||||
return -EINVAL;
|
||||
|
||||
ret = acpi_bus_get_device(obj->reference.handle, &device);
|
||||
if (ret)
|
||||
return ret == -ENODEV ? -EINVAL : ret;
|
||||
device = acpi_fetch_acpi_dev(obj->reference.handle);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
args->fwnode = acpi_fwnode_handle(device);
|
||||
args->nargs = 0;
|
||||
@ -719,9 +719,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
|
||||
struct fwnode_handle *ref_fwnode;
|
||||
|
||||
ret = acpi_bus_get_device(element->reference.handle,
|
||||
&device);
|
||||
if (ret)
|
||||
device = acpi_fetch_acpi_dev(element->reference.handle);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
nargs = 0;
|
||||
@ -1084,7 +1083,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
|
||||
* Returns parent node of an ACPI device or data firmware node or %NULL if
|
||||
* not available.
|
||||
*/
|
||||
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
static struct fwnode_handle *
|
||||
acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (is_acpi_data_node(fwnode)) {
|
||||
/* All data nodes have parent pointer so just return that */
|
||||
|
@ -791,9 +791,9 @@ static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
|
||||
{
|
||||
struct resource *res = context;
|
||||
struct acpi_device **consumer = (struct acpi_device **) ret;
|
||||
struct acpi_device *adev;
|
||||
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
if (acpi_bus_get_device(handle, &adev))
|
||||
if (!adev)
|
||||
return AE_OK;
|
||||
|
||||
if (acpi_dev_consumes_res(adev, res)) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/platform_data/x86/apple.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@ -135,12 +136,12 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
|
||||
static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
|
||||
void **ret_p)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_device_physical_node *pn;
|
||||
bool second_pass = (bool)data;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
if (acpi_bus_get_device(handle, &device))
|
||||
if (!device)
|
||||
return AE_OK;
|
||||
|
||||
if (device->handler && !device->handler->hotplug.enabled) {
|
||||
@ -180,10 +181,10 @@ static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
|
||||
static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
|
||||
void **ret_p)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
struct acpi_device_physical_node *pn;
|
||||
|
||||
if (acpi_bus_get_device(handle, &device))
|
||||
if (!device)
|
||||
return AE_OK;
|
||||
|
||||
mutex_lock(&device->physical_node_lock);
|
||||
@ -599,6 +600,19 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_bus_get_device);
|
||||
|
||||
/**
|
||||
* acpi_fetch_acpi_dev - Retrieve ACPI device object.
|
||||
* @handle: ACPI handle associated with the requested ACPI device object.
|
||||
*
|
||||
* Return a pointer to the ACPI device object associated with @handle, if
|
||||
* present, or NULL otherwise.
|
||||
*/
|
||||
struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle)
|
||||
{
|
||||
return handle_to_device(handle, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev);
|
||||
|
||||
static void get_acpi_device(void *dev)
|
||||
{
|
||||
acpi_dev_get(dev);
|
||||
@ -608,6 +622,7 @@ struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
|
||||
{
|
||||
return handle_to_device(handle, get_acpi_device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
|
||||
|
||||
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
|
||||
{
|
||||
@ -653,6 +668,19 @@ static int acpi_tie_acpi_dev(struct acpi_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_store_pld_crc(struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_pld_info *pld;
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_get_physical_device_location(adev->handle, &pld);
|
||||
if (ACPI_FAILURE(status))
|
||||
return;
|
||||
|
||||
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
|
||||
ACPI_FREE(pld);
|
||||
}
|
||||
|
||||
static int __acpi_device_add(struct acpi_device *device,
|
||||
void (*release)(struct device *))
|
||||
{
|
||||
@ -711,6 +739,8 @@ static int __acpi_device_add(struct acpi_device *device,
|
||||
if (device->wakeup.flags.valid)
|
||||
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
|
||||
|
||||
acpi_store_pld_crc(device);
|
||||
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
|
||||
if (device->parent)
|
||||
@ -796,9 +826,15 @@ static const char * const acpi_ignore_dep_ids[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */
|
||||
static const char * const acpi_honor_dep_ids[] = {
|
||||
"INT3472", /* Camera sensor PMIC / clk and regulator info */
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *device;
|
||||
acpi_status status;
|
||||
|
||||
/*
|
||||
@ -813,7 +849,9 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
|
||||
status = acpi_get_parent(handle, &handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return status == AE_NULL_ENTRY ? NULL : acpi_root;
|
||||
} while (acpi_bus_get_device(handle, &device));
|
||||
|
||||
device = acpi_fetch_acpi_dev(handle);
|
||||
} while (!device);
|
||||
return device;
|
||||
}
|
||||
|
||||
@ -1016,6 +1054,7 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
|
||||
|
||||
static void acpi_bus_get_power_flags(struct acpi_device *device)
|
||||
{
|
||||
unsigned long long dsc = ACPI_STATE_D0;
|
||||
u32 i;
|
||||
|
||||
/* Presence of _PS0|_PR0 indicates 'power manageable' */
|
||||
@ -1037,6 +1076,9 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
|
||||
if (acpi_has_method(device->handle, "_DSW"))
|
||||
device->power.flags.dsw_present = 1;
|
||||
|
||||
acpi_evaluate_integer(device->handle, "_DSC", NULL, &dsc);
|
||||
device->power.state_for_enumeration = dsc;
|
||||
|
||||
/*
|
||||
* Enumerate supported power management states
|
||||
*/
|
||||
@ -1764,8 +1806,12 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
|
||||
struct acpi_dep_data *dep;
|
||||
|
||||
list_for_each_entry(dep, &acpi_dep_list, node) {
|
||||
if (dep->consumer == adev->handle)
|
||||
if (dep->consumer == adev->handle) {
|
||||
if (dep->honor_dep)
|
||||
adev->flags.honor_deps = 1;
|
||||
|
||||
adev->dep_unmet++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1969,7 +2015,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
|
||||
for (count = 0, i = 0; i < dep_devices.count; i++) {
|
||||
struct acpi_device_info *info;
|
||||
struct acpi_dep_data *dep;
|
||||
bool skip;
|
||||
bool skip, honor_dep;
|
||||
|
||||
status = acpi_get_object_info(dep_devices.handles[i], &info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
@ -1978,6 +2024,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
|
||||
}
|
||||
|
||||
skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
|
||||
honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
|
||||
kfree(info);
|
||||
|
||||
if (skip)
|
||||
@ -1991,6 +2038,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
|
||||
|
||||
dep->supplier = dep_devices.handles[i];
|
||||
dep->consumer = handle;
|
||||
dep->honor_dep = honor_dep;
|
||||
|
||||
mutex_lock(&acpi_dep_list_lock);
|
||||
list_add_tail(&dep->node , &acpi_dep_list);
|
||||
@ -2005,11 +2053,10 @@ static bool acpi_bus_scan_second_pass;
|
||||
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
|
||||
struct acpi_device **adev_p)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
|
||||
acpi_object_type acpi_type;
|
||||
int type;
|
||||
|
||||
acpi_bus_get_device(handle, &device);
|
||||
if (device)
|
||||
goto out;
|
||||
|
||||
@ -2157,8 +2204,8 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
|
||||
register_dock_dependent_device(device, ejd);
|
||||
|
||||
acpi_bus_get_status(device);
|
||||
/* Skip devices that are not present. */
|
||||
if (!acpi_device_is_present(device)) {
|
||||
/* Skip devices that are not ready for enumeration (e.g. not present) */
|
||||
if (!acpi_dev_ready_for_enumeration(device)) {
|
||||
device->flags.initialized = false;
|
||||
acpi_device_clear_enumerated(device);
|
||||
device->flags.power_manageable = 0;
|
||||
@ -2320,6 +2367,23 @@ void acpi_dev_clear_dependencies(struct acpi_device *supplier)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
|
||||
|
||||
/**
|
||||
* acpi_dev_ready_for_enumeration - Check if the ACPI device is ready for enumeration
|
||||
* @device: Pointer to the &struct acpi_device to check
|
||||
*
|
||||
* Check if the device is present and has no unmet dependencies.
|
||||
*
|
||||
* Return true if the device is ready for enumeratino. Otherwise, return false.
|
||||
*/
|
||||
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
|
||||
{
|
||||
if (device->flags.honor_deps && device->dep_unmet)
|
||||
return false;
|
||||
|
||||
return acpi_device_is_present(device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
|
||||
|
||||
/**
|
||||
* acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
|
||||
* @supplier: Pointer to the dependee device
|
||||
@ -2438,42 +2502,33 @@ int acpi_bus_register_early_device(int type)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
|
||||
|
||||
static int acpi_bus_scan_fixed(void)
|
||||
static void acpi_bus_scan_fixed(void)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
/*
|
||||
* Enumerate all fixed-feature devices.
|
||||
*/
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *adev = NULL;
|
||||
|
||||
result = acpi_add_single_object(&device, NULL,
|
||||
ACPI_BUS_TYPE_POWER_BUTTON, false);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
device->flags.match_driver = true;
|
||||
result = device_attach(&device->dev);
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
||||
device_init_wakeup(&device->dev, true);
|
||||
acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON,
|
||||
false);
|
||||
if (adev) {
|
||||
adev->flags.match_driver = true;
|
||||
if (device_attach(&adev->dev) >= 0)
|
||||
device_init_wakeup(&adev->dev, true);
|
||||
else
|
||||
dev_dbg(&adev->dev, "No driver\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *adev = NULL;
|
||||
|
||||
result = acpi_add_single_object(&device, NULL,
|
||||
ACPI_BUS_TYPE_SLEEP_BUTTON, false);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
device->flags.match_driver = true;
|
||||
result = device_attach(&device->dev);
|
||||
acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON,
|
||||
false);
|
||||
if (adev) {
|
||||
adev->flags.match_driver = true;
|
||||
if (device_attach(&adev->dev) < 0)
|
||||
dev_dbg(&adev->dev, "No driver\n");
|
||||
}
|
||||
}
|
||||
|
||||
return result < 0 ? result : 0;
|
||||
}
|
||||
|
||||
static void __init acpi_get_spcr_uart_addr(void)
|
||||
@ -2494,9 +2549,8 @@ static void __init acpi_get_spcr_uart_addr(void)
|
||||
|
||||
static bool acpi_scan_initialized;
|
||||
|
||||
int __init acpi_scan_init(void)
|
||||
void __init acpi_scan_init(void)
|
||||
{
|
||||
int result;
|
||||
acpi_status status;
|
||||
struct acpi_table_stao *stao_ptr;
|
||||
|
||||
@ -2546,33 +2600,23 @@ int __init acpi_scan_init(void)
|
||||
/*
|
||||
* Enumerate devices in the ACPI namespace.
|
||||
*/
|
||||
result = acpi_bus_scan(ACPI_ROOT_OBJECT);
|
||||
if (result)
|
||||
goto out;
|
||||
if (acpi_bus_scan(ACPI_ROOT_OBJECT))
|
||||
goto unlock;
|
||||
|
||||
result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
|
||||
if (result)
|
||||
goto out;
|
||||
acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
|
||||
if (!acpi_root)
|
||||
goto unlock;
|
||||
|
||||
/* Fixed feature devices do not exist on HW-reduced platform */
|
||||
if (!acpi_gbl_reduced_hardware) {
|
||||
result = acpi_bus_scan_fixed();
|
||||
if (result) {
|
||||
acpi_detach_data(acpi_root->handle,
|
||||
acpi_scan_drop_device);
|
||||
acpi_device_del(acpi_root);
|
||||
acpi_bus_put_acpi_device(acpi_root);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (!acpi_gbl_reduced_hardware)
|
||||
acpi_bus_scan_fixed();
|
||||
|
||||
acpi_turn_off_unused_power_resources();
|
||||
|
||||
acpi_scan_initialized = true;
|
||||
|
||||
out:
|
||||
unlock:
|
||||
mutex_unlock(&acpi_scan_lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct acpi_probe_entry *ape;
|
||||
|
@ -73,7 +73,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
|
||||
acpi_set_waking_vector(acpi_wakeup_address);
|
||||
|
||||
}
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
#endif
|
||||
pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
|
||||
acpi_enable_wakeup_devices(acpi_state);
|
||||
@ -566,8 +565,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
||||
u32 acpi_state = acpi_target_sleep_state;
|
||||
int error;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
|
||||
switch (acpi_state) {
|
||||
case ACPI_STATE_S1:
|
||||
@ -739,21 +736,15 @@ bool acpi_s2idle_wake(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check non-EC GPE wakeups and dispatch the EC GPE. */
|
||||
/*
|
||||
* Check non-EC GPE wakeups and if there are none, cancel the
|
||||
* SCI-related wakeup and dispatch the EC GPE.
|
||||
*/
|
||||
if (acpi_ec_dispatch_gpe()) {
|
||||
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case
|
||||
* there are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the
|
||||
* SCI will retrigger after the rearming below, so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
acpi_os_wait_events_complete();
|
||||
|
||||
/*
|
||||
@ -816,14 +807,18 @@ void __weak acpi_s2idle_setup(void)
|
||||
|
||||
static void acpi_sleep_suspend_setup(void)
|
||||
{
|
||||
bool suspend_ops_needed = false;
|
||||
int i;
|
||||
|
||||
for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
|
||||
if (acpi_sleep_state_supported(i))
|
||||
if (acpi_sleep_state_supported(i)) {
|
||||
sleep_states[i] = 1;
|
||||
suspend_ops_needed = true;
|
||||
}
|
||||
|
||||
suspend_set_ops(old_suspend_ordering ?
|
||||
&acpi_suspend_ops_old : &acpi_suspend_ops);
|
||||
if (suspend_ops_needed)
|
||||
suspend_set_ops(old_suspend_ordering ?
|
||||
&acpi_suspend_ops_old : &acpi_suspend_ops);
|
||||
|
||||
acpi_s2idle_setup();
|
||||
}
|
||||
@ -874,11 +869,11 @@ static inline void acpi_sleep_syscore_init(void) {}
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static unsigned long s4_hardware_signature;
|
||||
static struct acpi_table_facs *facs;
|
||||
static bool nosigcheck;
|
||||
static int sigcheck = -1; /* Default behaviour is just to warn */
|
||||
|
||||
void __init acpi_no_s4_hw_signature(void)
|
||||
void __init acpi_check_s4_hw_signature(int check)
|
||||
{
|
||||
nosigcheck = true;
|
||||
sigcheck = check;
|
||||
}
|
||||
|
||||
static int acpi_hibernation_begin(pm_message_t stage)
|
||||
@ -900,8 +895,6 @@ static int acpi_hibernation_enter(void)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
/* This shouldn't return. If it returns, we have a problem */
|
||||
status = acpi_enter_sleep_state(ACPI_STATE_S4);
|
||||
/* Reprogram control registers */
|
||||
@ -1006,12 +999,28 @@ static void acpi_sleep_hibernate_setup(void)
|
||||
hibernation_set_ops(old_suspend_ordering ?
|
||||
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
||||
sleep_states[ACPI_STATE_S4] = 1;
|
||||
if (nosigcheck)
|
||||
if (!sigcheck)
|
||||
return;
|
||||
|
||||
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
|
||||
if (facs)
|
||||
if (facs) {
|
||||
/*
|
||||
* s4_hardware_signature is the local variable which is just
|
||||
* used to warn about mismatch after we're attempting to
|
||||
* resume (in violation of the ACPI specification.)
|
||||
*/
|
||||
s4_hardware_signature = facs->hardware_signature;
|
||||
|
||||
if (sigcheck > 0) {
|
||||
/*
|
||||
* If we're actually obeying the ACPI specification
|
||||
* then the signature is written out as part of the
|
||||
* swsusp header, in order to allow the boot kernel
|
||||
* to gracefully decline to resume.
|
||||
*/
|
||||
swsusp_hardware_signature = facs->hardware_signature;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* !CONFIG_HIBERNATION */
|
||||
static inline void acpi_sleep_hibernate_setup(void) {}
|
||||
|
@ -107,8 +107,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
||||
pr_info("SPCR table version %d\n", table->header.revision);
|
||||
|
||||
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
switch (ACPI_ACCESS_BIT_WIDTH((
|
||||
table->serial_port.access_width))) {
|
||||
u32 bit_width = table->serial_port.access_width;
|
||||
|
||||
if (bit_width > ACPI_ACCESS_BIT_MAX) {
|
||||
pr_err("Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
|
||||
bit_width = ACPI_ACCESS_BIT_DEFAULT;
|
||||
}
|
||||
switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) {
|
||||
default:
|
||||
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
|
||||
fallthrough;
|
||||
|
@ -939,10 +939,11 @@ static struct attribute *hotplug_profile_attrs[] = {
|
||||
&hotplug_enabled_attr.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(hotplug_profile);
|
||||
|
||||
static struct kobj_type acpi_hotplug_profile_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.default_attrs = hotplug_profile_attrs,
|
||||
.default_groups = hotplug_profile_groups,
|
||||
};
|
||||
|
||||
void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
|
||||
|
@ -35,12 +35,13 @@ static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
|
||||
|
||||
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
|
||||
|
||||
static int acpi_apic_instance __initdata;
|
||||
static int acpi_apic_instance __initdata_or_acpilib;
|
||||
|
||||
enum acpi_subtable_type {
|
||||
ACPI_SUBTABLE_COMMON,
|
||||
ACPI_SUBTABLE_HMAT,
|
||||
ACPI_SUBTABLE_PRMT,
|
||||
ACPI_SUBTABLE_CEDT,
|
||||
};
|
||||
|
||||
struct acpi_subtable_entry {
|
||||
@ -52,7 +53,7 @@ struct acpi_subtable_entry {
|
||||
* Disable table checksum verification for the early stage due to the size
|
||||
* limitation of the current x86 early mapping implementation.
|
||||
*/
|
||||
static bool acpi_verify_table_checksum __initdata = false;
|
||||
static bool acpi_verify_table_checksum __initdata_or_acpilib = false;
|
||||
|
||||
void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
{
|
||||
@ -216,7 +217,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long __init
|
||||
static unsigned long __init_or_acpilib
|
||||
acpi_get_entry_type(struct acpi_subtable_entry *entry)
|
||||
{
|
||||
switch (entry->type) {
|
||||
@ -226,11 +227,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
|
||||
return entry->hdr->hmat.type;
|
||||
case ACPI_SUBTABLE_PRMT:
|
||||
return 0;
|
||||
case ACPI_SUBTABLE_CEDT:
|
||||
return entry->hdr->cedt.type;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long __init
|
||||
static unsigned long __init_or_acpilib
|
||||
acpi_get_entry_length(struct acpi_subtable_entry *entry)
|
||||
{
|
||||
switch (entry->type) {
|
||||
@ -240,11 +243,13 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
|
||||
return entry->hdr->hmat.length;
|
||||
case ACPI_SUBTABLE_PRMT:
|
||||
return entry->hdr->prmt.length;
|
||||
case ACPI_SUBTABLE_CEDT:
|
||||
return entry->hdr->cedt.length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long __init
|
||||
static unsigned long __init_or_acpilib
|
||||
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
|
||||
{
|
||||
switch (entry->type) {
|
||||
@ -254,20 +259,40 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
|
||||
return sizeof(entry->hdr->hmat);
|
||||
case ACPI_SUBTABLE_PRMT:
|
||||
return sizeof(entry->hdr->prmt);
|
||||
case ACPI_SUBTABLE_CEDT:
|
||||
return sizeof(entry->hdr->cedt);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum acpi_subtable_type __init
|
||||
static enum acpi_subtable_type __init_or_acpilib
|
||||
acpi_get_subtable_type(char *id)
|
||||
{
|
||||
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
|
||||
return ACPI_SUBTABLE_HMAT;
|
||||
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
|
||||
return ACPI_SUBTABLE_PRMT;
|
||||
if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
|
||||
return ACPI_SUBTABLE_CEDT;
|
||||
return ACPI_SUBTABLE_COMMON;
|
||||
}
|
||||
|
||||
static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
|
||||
{
|
||||
return proc->handler || proc->handler_arg;
|
||||
}
|
||||
|
||||
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
|
||||
union acpi_subtable_headers *hdr,
|
||||
unsigned long end)
|
||||
{
|
||||
if (proc->handler)
|
||||
return proc->handler(hdr, end);
|
||||
if (proc->handler_arg)
|
||||
return proc->handler_arg(hdr, proc->arg, end);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_parse_entries_array - for each proc_num find a suitable subtable
|
||||
*
|
||||
@ -291,10 +316,10 @@ acpi_get_subtable_type(char *id)
|
||||
* On success returns sum of all matching entries for all proc handlers.
|
||||
* Otherwise, -ENODEV or -EINVAL is returned.
|
||||
*/
|
||||
static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
struct acpi_table_header *table_header,
|
||||
struct acpi_subtable_proc *proc, int proc_num,
|
||||
unsigned int max_entries)
|
||||
static int __init_or_acpilib acpi_parse_entries_array(
|
||||
char *id, unsigned long table_size,
|
||||
struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
|
||||
int proc_num, unsigned int max_entries)
|
||||
{
|
||||
struct acpi_subtable_entry entry;
|
||||
unsigned long table_end, subtable_len, entry_len;
|
||||
@ -318,8 +343,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
for (i = 0; i < proc_num; i++) {
|
||||
if (acpi_get_entry_type(&entry) != proc[i].id)
|
||||
continue;
|
||||
if (!proc[i].handler ||
|
||||
(!errs && proc[i].handler(entry.hdr, table_end))) {
|
||||
if (!has_handler(&proc[i]) ||
|
||||
(!errs &&
|
||||
call_handler(&proc[i], entry.hdr, table_end))) {
|
||||
errs++;
|
||||
continue;
|
||||
}
|
||||
@ -352,10 +378,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
return errs ? -EINVAL : count;
|
||||
}
|
||||
|
||||
int __init acpi_table_parse_entries_array(char *id,
|
||||
unsigned long table_size,
|
||||
struct acpi_subtable_proc *proc, int proc_num,
|
||||
unsigned int max_entries)
|
||||
int __init_or_acpilib acpi_table_parse_entries_array(
|
||||
char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
|
||||
int proc_num, unsigned int max_entries)
|
||||
{
|
||||
struct acpi_table_header *table_header = NULL;
|
||||
int count;
|
||||
@ -375,7 +400,7 @@ int __init acpi_table_parse_entries_array(char *id,
|
||||
|
||||
acpi_get_table(id, instance, &table_header);
|
||||
if (!table_header) {
|
||||
pr_warn("%4.4s not present\n", id);
|
||||
pr_debug("%4.4s not present\n", id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -386,21 +411,41 @@ int __init acpi_table_parse_entries_array(char *id,
|
||||
return count;
|
||||
}
|
||||
|
||||
int __init acpi_table_parse_entries(char *id,
|
||||
unsigned long table_size,
|
||||
int entry_id,
|
||||
acpi_tbl_entry_handler handler,
|
||||
unsigned int max_entries)
|
||||
static int __init_or_acpilib __acpi_table_parse_entries(
|
||||
char *id, unsigned long table_size, int entry_id,
|
||||
acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg,
|
||||
void *arg, unsigned int max_entries)
|
||||
{
|
||||
struct acpi_subtable_proc proc = {
|
||||
.id = entry_id,
|
||||
.handler = handler,
|
||||
.handler_arg = handler_arg,
|
||||
.arg = arg,
|
||||
};
|
||||
|
||||
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
|
||||
max_entries);
|
||||
}
|
||||
|
||||
int __init_or_acpilib
|
||||
acpi_table_parse_cedt(enum acpi_cedt_type id,
|
||||
acpi_tbl_entry_handler_arg handler_arg, void *arg)
|
||||
{
|
||||
return __acpi_table_parse_entries(ACPI_SIG_CEDT,
|
||||
sizeof(struct acpi_table_cedt), id,
|
||||
NULL, handler_arg, arg, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt);
|
||||
|
||||
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
|
||||
int entry_id,
|
||||
acpi_tbl_entry_handler handler,
|
||||
unsigned int max_entries)
|
||||
{
|
||||
return __acpi_table_parse_entries(id, table_size, entry_id, handler,
|
||||
NULL, NULL, max_entries);
|
||||
}
|
||||
|
||||
int __init acpi_table_parse_madt(enum acpi_madt_type id,
|
||||
acpi_tbl_entry_handler handler, unsigned int max_entries)
|
||||
{
|
||||
@ -500,7 +545,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
|
||||
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
|
||||
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
|
||||
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
|
||||
ACPI_SIG_NHLT };
|
||||
ACPI_SIG_NHLT, ACPI_SIG_AEST };
|
||||
|
||||
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
|
||||
|
||||
@ -723,7 +768,7 @@ static void __init acpi_table_initrd_scan(void)
|
||||
/*
|
||||
* Mark the table to avoid being used in
|
||||
* acpi_table_initrd_override(). Though this is not possible
|
||||
* because override is disabled in acpi_install_table().
|
||||
* because override is disabled in acpi_install_physical_table().
|
||||
*/
|
||||
if (test_and_set_bit(table_index, acpi_initrd_installed)) {
|
||||
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
|
||||
@ -734,7 +779,7 @@ static void __init acpi_table_initrd_scan(void)
|
||||
table->signature, table->oem_id,
|
||||
table->oem_table_id);
|
||||
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
|
||||
acpi_install_table(acpi_tables_addr + table_offset, TRUE);
|
||||
acpi_install_physical_table(acpi_tables_addr + table_offset);
|
||||
next_table:
|
||||
table_offset += table_length;
|
||||
table_index++;
|
||||
|
@ -697,7 +697,6 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
|
||||
struct acpi_device *device = cdev->devdata;
|
||||
struct acpi_thermal *tz = thermal->devdata;
|
||||
struct acpi_device *dev;
|
||||
acpi_status status;
|
||||
acpi_handle handle;
|
||||
int i;
|
||||
int j;
|
||||
@ -715,8 +714,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
|
||||
for (i = 0; i < tz->trips.passive.devices.count;
|
||||
i++) {
|
||||
handle = tz->trips.passive.devices.handles[i];
|
||||
status = acpi_bus_get_device(handle, &dev);
|
||||
if (ACPI_FAILURE(status) || dev != device)
|
||||
dev = acpi_fetch_acpi_dev(handle);
|
||||
if (dev != device)
|
||||
continue;
|
||||
if (bind)
|
||||
result =
|
||||
@ -741,8 +740,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
|
||||
j < tz->trips.active[i].devices.count;
|
||||
j++) {
|
||||
handle = tz->trips.active[i].devices.handles[j];
|
||||
status = acpi_bus_get_device(handle, &dev);
|
||||
if (ACPI_FAILURE(status) || dev != device)
|
||||
dev = acpi_fetch_acpi_dev(handle);
|
||||
if (dev != device)
|
||||
continue;
|
||||
if (bind)
|
||||
result = thermal_zone_bind_cooling_device
|
||||
@ -1098,8 +1097,6 @@ static int acpi_thermal_resume(struct device *dev)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
|
||||
if (!(&tz->trips.active[i]))
|
||||
break;
|
||||
if (!tz->trips.active[i].flags.valid)
|
||||
break;
|
||||
tz->trips.active[i].flags.enabled = 1;
|
||||
|
@ -59,18 +59,16 @@ static void acpi_video_parse_cmdline(void)
|
||||
static acpi_status
|
||||
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
struct acpi_device *acpi_dev = acpi_fetch_acpi_dev(handle);
|
||||
long *cap = context;
|
||||
struct pci_dev *dev;
|
||||
struct acpi_device *acpi_dev;
|
||||
|
||||
static const struct acpi_device_id video_ids[] = {
|
||||
{ACPI_VIDEO_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
if (acpi_bus_get_device(handle, &acpi_dev))
|
||||
return AE_OK;
|
||||
|
||||
if (!acpi_match_device_ids(acpi_dev, video_ids)) {
|
||||
if (acpi_dev && !acpi_match_device_ids(acpi_dev, video_ids)) {
|
||||
dev = acpi_get_pci_dev(handle);
|
||||
if (!dev)
|
||||
return AE_OK;
|
||||
@ -115,7 +113,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
.ident = "X360",
|
||||
/* X360 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
|
||||
@ -124,7 +122,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
.ident = "Asus UL30VT",
|
||||
/* Asus UL30VT */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
|
||||
@ -132,7 +130,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
.ident = "Asus UL30A",
|
||||
/* Asus UL30A */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
|
||||
@ -140,7 +138,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
.ident = "GIGABYTE GB-BXBT-2807",
|
||||
/* GIGABYTE GB-BXBT-2807 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
|
||||
@ -148,12 +146,20 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
.ident = "Sony VPCEH3U1E",
|
||||
/* Sony VPCEH3U1E */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_vendor,
|
||||
/* Xiaomi Mi Pad 2 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* These models have a working acpi_video backlight control, and using
|
||||
@ -164,7 +170,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "ThinkPad T420",
|
||||
/* ThinkPad T420 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"),
|
||||
@ -172,7 +178,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "ThinkPad T520",
|
||||
/* ThinkPad T520 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
|
||||
@ -180,7 +186,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "ThinkPad X201s",
|
||||
/* ThinkPad X201s */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
|
||||
@ -188,7 +194,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "ThinkPad X201T",
|
||||
/* ThinkPad X201T */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
|
||||
@ -199,7 +205,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "HP ENVY 15 Notebook",
|
||||
/* HP ENVY 15 Notebook */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
|
||||
@ -207,7 +213,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
|
||||
/* SAMSUNG 870Z5E/880Z5E/680Z5E */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
|
||||
@ -215,7 +221,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
|
||||
/* SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,
|
||||
@ -225,7 +231,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV",
|
||||
/* SAMSUNG 3570R/370R/470R/450R/510R/4450RV */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,
|
||||
@ -235,7 +241,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 670Z5E",
|
||||
/* SAMSUNG 670Z5E */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
|
||||
@ -244,7 +250,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 730U3E/740U3E",
|
||||
/* SAMSUNG 730U3E/740U3E */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
|
||||
@ -253,7 +259,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
|
||||
/* SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,
|
||||
@ -263,7 +269,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1272633 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "Dell XPS14 L421X",
|
||||
/* Dell XPS14 L421X */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
|
||||
@ -272,7 +278,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "Dell XPS15 L521X",
|
||||
/* Dell XPS15 L521X */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
|
||||
@ -281,7 +287,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=108971 */
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "SAMSUNG 530U4E/540U4E",
|
||||
/* SAMSUNG 530U4E/540U4E */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
|
||||
@ -290,7 +296,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
/* https://bugs.launchpad.net/bugs/1894667 */
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
.ident = "HP 635 Notebook",
|
||||
/* HP 635 Notebook */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
|
||||
@ -301,7 +307,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1201530 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Lenovo Ideapad S405",
|
||||
/* Lenovo Ideapad S405 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"),
|
||||
@ -310,7 +316,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Lenovo Ideapad Z570",
|
||||
/* Lenovo Ideapad Z570 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
|
||||
@ -318,7 +324,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Lenovo E41-25",
|
||||
/* Lenovo E41-25 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
|
||||
@ -326,7 +332,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Lenovo E41-45",
|
||||
/* Lenovo E41-45 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
|
||||
@ -335,7 +341,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Apple MacBook Pro 12,1",
|
||||
/* Apple MacBook Pro 12,1 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"),
|
||||
@ -343,7 +349,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Dell Vostro V131",
|
||||
/* Dell Vostro V131 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
|
||||
@ -352,7 +358,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Dell XPS 17 L702X",
|
||||
/* Dell XPS 17 L702X */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
|
||||
@ -360,7 +366,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Dell Precision 7510",
|
||||
/* Dell Precision 7510 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
|
||||
@ -368,7 +374,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Acer Aspire 5738z",
|
||||
/* Acer Aspire 5738z */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
|
||||
@ -378,7 +384,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
{
|
||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Acer TravelMate 5735Z",
|
||||
/* Acer TravelMate 5735Z */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"),
|
||||
@ -387,7 +393,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "ASUSTeK COMPUTER INC. GA401",
|
||||
/* ASUSTeK COMPUTER INC. GA401 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
|
||||
@ -395,7 +401,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "ASUSTeK COMPUTER INC. GA502",
|
||||
/* ASUSTeK COMPUTER INC. GA502 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
|
||||
@ -403,12 +409,87 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "ASUSTeK COMPUTER INC. GA503",
|
||||
/* ASUSTeK COMPUTER INC. GA503 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
|
||||
* working native and video interface. However the default detection
|
||||
* mechanism first registers the video interface before unregistering
|
||||
* it again and switching to the native interface during boot. This
|
||||
* results in a dangling SBIOS request for backlight change for some
|
||||
* reason, causing the backlight to switch to ~2% once per boot on the
|
||||
* first power cord connect or disconnect event. Setting the native
|
||||
* interface explicitly circumvents this buggy behaviour, by avoiding
|
||||
* the unregistering process.
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xRU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xRU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xRU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xRU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xRU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xNU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xNU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Clevo NL5xNU",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Desktops which falsely report a backlight and which our heuristics
|
||||
@ -416,7 +497,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_none,
|
||||
.ident = "Dell OptiPlex 9020M",
|
||||
/* Dell OptiPlex 9020M */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
|
||||
@ -424,7 +505,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_none,
|
||||
.ident = "MSI MS-7721",
|
||||
/* MSI MS-7721 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
|
||||
|
@ -293,9 +293,9 @@ static void lpi_check_constraints(void)
|
||||
|
||||
for (i = 0; i < lpi_constraints_table_size; ++i) {
|
||||
acpi_handle handle = lpi_constraints_table[i].handle;
|
||||
struct acpi_device *adev;
|
||||
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
|
||||
|
||||
if (!handle || acpi_bus_get_device(handle, &adev))
|
||||
if (!adev)
|
||||
continue;
|
||||
|
||||
acpi_handle_debug(handle,
|
||||
|
@ -8,8 +8,11 @@
|
||||
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "ACPI: " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "../internal.h"
|
||||
@ -71,6 +74,12 @@ static const struct override_status_id override_status_ids[] = {
|
||||
PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}),
|
||||
PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}),
|
||||
|
||||
/* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */
|
||||
PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
|
||||
}),
|
||||
|
||||
/*
|
||||
* The INT0002 device is necessary to clear wakeup interrupt sources
|
||||
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
|
||||
@ -202,3 +211,183 @@ bool force_storage_d3(void)
|
||||
{
|
||||
return x86_match_cpu(storage_d3_cpu_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
* x86 ACPI boards which ship with only Android as their factory image usually
|
||||
* declare a whole bunch of bogus I2C devices in their ACPI tables and sometimes
|
||||
* there are issues with serdev devices on these boards too, e.g. the resource
|
||||
* points to the wrong serdev_controller.
|
||||
*
|
||||
* Instantiating I2C / serdev devs for these bogus devs causes various issues,
|
||||
* e.g. GPIO/IRQ resource conflicts because sometimes drivers do bind to them.
|
||||
* The Android x86 kernel fork shipped on these devices has some special code
|
||||
* to remove the bogus I2C clients (and AFAICT serdevs are ignored completely).
|
||||
*
|
||||
* The acpi_quirk_skip_*_enumeration() functions below are used by the I2C or
|
||||
* serdev code to skip instantiating any I2C or serdev devs on broken boards.
|
||||
*
|
||||
* In case of I2C an exception is made for HIDs on the i2c_acpi_known_good_ids
|
||||
* list. These are known to always be correct (and in case of the audio-codecs
|
||||
* the drivers heavily rely on the codec being enumerated through ACPI).
|
||||
*
|
||||
* Note these boards typically do actually have I2C and serdev devices,
|
||||
* just different ones then the ones described in their DSDT. The devices
|
||||
* which are actually present are manually instantiated by the
|
||||
* drivers/platform/x86/x86-android-tablets.c kernel module.
|
||||
*/
|
||||
#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
|
||||
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
|
||||
#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
|
||||
#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
|
||||
|
||||
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
/*
|
||||
* 1. Devices with only the skip / don't-skip AC and battery quirks,
|
||||
* sorted alphabetically.
|
||||
*/
|
||||
{
|
||||
/* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
|
||||
},
|
||||
.driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY
|
||||
},
|
||||
{
|
||||
/* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
.driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY
|
||||
},
|
||||
|
||||
/*
|
||||
* 2. Devices which also have the skip i2c/serdev quirks and which
|
||||
* need the x86-android-tablets module to properly work.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
|
||||
{
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_UART1_TTY_UART2_SKIP |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
},
|
||||
{
|
||||
/* Whitelabel (sold as various brands) TM800A550L */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
|
||||
/* Above strings are too generic, also match on BIOS version */
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
},
|
||||
#endif
|
||||
{}
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
|
||||
static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
|
||||
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
|
||||
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
|
||||
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
|
||||
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
|
||||
{}
|
||||
};
|
||||
|
||||
bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
long quirks;
|
||||
|
||||
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
|
||||
if (!dmi_id)
|
||||
return false;
|
||||
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
if (!(quirks & ACPI_QUIRK_SKIP_I2C_CLIENTS))
|
||||
return false;
|
||||
|
||||
return acpi_match_device_ids(adev, i2c_acpi_known_good_ids);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
|
||||
|
||||
int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
|
||||
const struct dmi_system_id *dmi_id;
|
||||
long quirks = 0;
|
||||
|
||||
*skip = false;
|
||||
|
||||
/* !dev_is_platform() to not match on PNP enumerated debug UARTs */
|
||||
if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent))
|
||||
return 0;
|
||||
|
||||
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
|
||||
if (dmi_id)
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
|
||||
if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
|
||||
if (!strcmp(adev->pnp.unique_id, "1"))
|
||||
return -ENODEV; /* Create tty cdev instead of serdev */
|
||||
|
||||
if (!strcmp(adev->pnp.unique_id, "2"))
|
||||
*skip = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
|
||||
#endif
|
||||
|
||||
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
|
||||
static const struct {
|
||||
const char *hid;
|
||||
int hrv;
|
||||
} acpi_skip_ac_and_battery_pmic_ids[] = {
|
||||
{ "INT33F4", -1 }, /* X-Powers AXP288 PMIC */
|
||||
{ "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */
|
||||
};
|
||||
|
||||
bool acpi_quirk_skip_acpi_ac_and_battery(void)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
long quirks = 0;
|
||||
int i;
|
||||
|
||||
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
|
||||
if (dmi_id)
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
|
||||
if (quirks & ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY)
|
||||
return true;
|
||||
|
||||
if (quirks & ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_skip_ac_and_battery_pmic_ids); i++) {
|
||||
if (acpi_dev_present(acpi_skip_ac_and_battery_pmic_ids[i].hid, "1",
|
||||
acpi_skip_ac_and_battery_pmic_ids[i].hrv)) {
|
||||
pr_info_once("found native %s PMIC, skipping ACPI AC and battery devices\n",
|
||||
acpi_skip_ac_and_battery_pmic_ids[i].hid);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_quirk_skip_acpi_ac_and_battery);
|
||||
|
@ -69,7 +69,7 @@
|
||||
|
||||
#include <uapi/linux/android/binder.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/cacheflush.h>
|
||||
|
||||
#include "binder_internal.h"
|
||||
#include "binder_trace.h"
|
||||
@ -1946,7 +1946,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
case BINDER_TYPE_FD: {
|
||||
/*
|
||||
* No need to close the file here since user-space
|
||||
* closes it for for successfully delivered
|
||||
* closes it for successfully delivered
|
||||
* transactions. For transactions that weren't
|
||||
* delivered, the new fd was never allocated so
|
||||
* there is no need to close and the fput on the
|
||||
@ -2233,16 +2233,258 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
||||
/**
|
||||
* struct binder_ptr_fixup - data to be fixed-up in target buffer
|
||||
* @offset offset in target buffer to fixup
|
||||
* @skip_size bytes to skip in copy (fixup will be written later)
|
||||
* @fixup_data data to write at fixup offset
|
||||
* @node list node
|
||||
*
|
||||
* This is used for the pointer fixup list (pf) which is created and consumed
|
||||
* during binder_transaction() and is only accessed locally. No
|
||||
* locking is necessary.
|
||||
*
|
||||
* The list is ordered by @offset.
|
||||
*/
|
||||
struct binder_ptr_fixup {
|
||||
binder_size_t offset;
|
||||
size_t skip_size;
|
||||
binder_uintptr_t fixup_data;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_sg_copy - scatter-gather data to be copied
|
||||
* @offset offset in target buffer
|
||||
* @sender_uaddr user address in source buffer
|
||||
* @length bytes to copy
|
||||
* @node list node
|
||||
*
|
||||
* This is used for the sg copy list (sgc) which is created and consumed
|
||||
* during binder_transaction() and is only accessed locally. No
|
||||
* locking is necessary.
|
||||
*
|
||||
* The list is ordered by @offset.
|
||||
*/
|
||||
struct binder_sg_copy {
|
||||
binder_size_t offset;
|
||||
const void __user *sender_uaddr;
|
||||
size_t length;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
/**
|
||||
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
|
||||
* @alloc: binder_alloc associated with @buffer
|
||||
* @buffer: binder buffer in target process
|
||||
* @sgc_head: list_head of scatter-gather copy list
|
||||
* @pf_head: list_head of pointer fixup list
|
||||
*
|
||||
* Processes all elements of @sgc_head, applying fixups from @pf_head
|
||||
* and copying the scatter-gather data from the source process' user
|
||||
* buffer to the target's buffer. It is expected that the list creation
|
||||
* and processing all occurs during binder_transaction() so these lists
|
||||
* are only accessed in local context.
|
||||
*
|
||||
* Return: 0=success, else -errno
|
||||
*/
|
||||
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
struct list_head *sgc_head,
|
||||
struct list_head *pf_head)
|
||||
{
|
||||
int ret = 0;
|
||||
struct binder_sg_copy *sgc, *tmpsgc;
|
||||
struct binder_ptr_fixup *pf =
|
||||
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
|
||||
node);
|
||||
|
||||
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
||||
size_t bytes_copied = 0;
|
||||
|
||||
while (bytes_copied < sgc->length) {
|
||||
size_t copy_size;
|
||||
size_t bytes_left = sgc->length - bytes_copied;
|
||||
size_t offset = sgc->offset + bytes_copied;
|
||||
|
||||
/*
|
||||
* We copy up to the fixup (pointed to by pf)
|
||||
*/
|
||||
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
|
||||
: bytes_left;
|
||||
if (!ret && copy_size)
|
||||
ret = binder_alloc_copy_user_to_buffer(
|
||||
alloc, buffer,
|
||||
offset,
|
||||
sgc->sender_uaddr + bytes_copied,
|
||||
copy_size);
|
||||
bytes_copied += copy_size;
|
||||
if (copy_size != bytes_left) {
|
||||
BUG_ON(!pf);
|
||||
/* we stopped at a fixup offset */
|
||||
if (pf->skip_size) {
|
||||
/*
|
||||
* we are just skipping. This is for
|
||||
* BINDER_TYPE_FDA where the translated
|
||||
* fds will be fixed up when we get
|
||||
* to target context.
|
||||
*/
|
||||
bytes_copied += pf->skip_size;
|
||||
} else {
|
||||
/* apply the fixup indicated by pf */
|
||||
if (!ret)
|
||||
ret = binder_alloc_copy_to_buffer(
|
||||
alloc, buffer,
|
||||
pf->offset,
|
||||
&pf->fixup_data,
|
||||
sizeof(pf->fixup_data));
|
||||
bytes_copied += sizeof(pf->fixup_data);
|
||||
}
|
||||
list_del(&pf->node);
|
||||
kfree(pf);
|
||||
pf = list_first_entry_or_null(pf_head,
|
||||
struct binder_ptr_fixup, node);
|
||||
}
|
||||
}
|
||||
list_del(&sgc->node);
|
||||
kfree(sgc);
|
||||
}
|
||||
BUG_ON(!list_empty(pf_head));
|
||||
BUG_ON(!list_empty(sgc_head));
|
||||
|
||||
return ret > 0 ? -EINVAL : ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_cleanup_deferred_txn_lists() - free specified lists
|
||||
* @sgc_head: list_head of scatter-gather copy list
|
||||
* @pf_head: list_head of pointer fixup list
|
||||
*
|
||||
* Called to clean up @sgc_head and @pf_head if there is an
|
||||
* error.
|
||||
*/
|
||||
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
|
||||
struct list_head *pf_head)
|
||||
{
|
||||
struct binder_sg_copy *sgc, *tmpsgc;
|
||||
struct binder_ptr_fixup *pf, *tmppf;
|
||||
|
||||
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
||||
list_del(&sgc->node);
|
||||
kfree(sgc);
|
||||
}
|
||||
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
||||
list_del(&pf->node);
|
||||
kfree(pf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_defer_copy() - queue a scatter-gather buffer for copy
|
||||
* @sgc_head: list_head of scatter-gather copy list
|
||||
* @offset: binder buffer offset in target process
|
||||
* @sender_uaddr: user address in source process
|
||||
* @length: bytes to copy
|
||||
*
|
||||
* Specify a scatter-gather block to be copied. The actual copy must
|
||||
* be deferred until all the needed fixups are identified and queued.
|
||||
* Then the copy and fixups are done together so un-translated values
|
||||
* from the source are never visible in the target buffer.
|
||||
*
|
||||
* We are guaranteed that repeated calls to this function will have
|
||||
* monotonically increasing @offset values so the list will naturally
|
||||
* be ordered.
|
||||
*
|
||||
* Return: 0=success, else -errno
|
||||
*/
|
||||
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
|
||||
const void __user *sender_uaddr, size_t length)
|
||||
{
|
||||
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
|
||||
|
||||
if (!bc)
|
||||
return -ENOMEM;
|
||||
|
||||
bc->offset = offset;
|
||||
bc->sender_uaddr = sender_uaddr;
|
||||
bc->length = length;
|
||||
INIT_LIST_HEAD(&bc->node);
|
||||
|
||||
/*
|
||||
* We are guaranteed that the deferred copies are in-order
|
||||
* so just add to the tail.
|
||||
*/
|
||||
list_add_tail(&bc->node, sgc_head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_add_fixup() - queue a fixup to be applied to sg copy
|
||||
* @pf_head: list_head of binder ptr fixup list
|
||||
* @offset: binder buffer offset in target process
|
||||
* @fixup: bytes to be copied for fixup
|
||||
* @skip_size: bytes to skip when copying (fixup will be applied later)
|
||||
*
|
||||
* Add the specified fixup to a list ordered by @offset. When copying
|
||||
* the scatter-gather buffers, the fixup will be copied instead of
|
||||
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
|
||||
* will be applied later (in target process context), so we just skip
|
||||
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
|
||||
* value in @fixup.
|
||||
*
|
||||
* This function is called *mostly* in @offset order, but there are
|
||||
* exceptions. Since out-of-order inserts are relatively uncommon,
|
||||
* we insert the new element by searching backward from the tail of
|
||||
* the list.
|
||||
*
|
||||
* Return: 0=success, else -errno
|
||||
*/
|
||||
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
|
||||
binder_uintptr_t fixup, size_t skip_size)
|
||||
{
|
||||
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
|
||||
struct binder_ptr_fixup *tmppf;
|
||||
|
||||
if (!pf)
|
||||
return -ENOMEM;
|
||||
|
||||
pf->offset = offset;
|
||||
pf->fixup_data = fixup;
|
||||
pf->skip_size = skip_size;
|
||||
INIT_LIST_HEAD(&pf->node);
|
||||
|
||||
/* Fixups are *mostly* added in-order, but there are some
|
||||
* exceptions. Look backwards through list for insertion point.
|
||||
*/
|
||||
list_for_each_entry_reverse(tmppf, pf_head, node) {
|
||||
if (tmppf->offset < pf->offset) {
|
||||
list_add(&pf->node, &tmppf->node);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* if we get here, then the new offset is the lowest so
|
||||
* insert at the head
|
||||
*/
|
||||
list_add(&pf->node, pf_head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_translate_fd_array(struct list_head *pf_head,
|
||||
struct binder_fd_array_object *fda,
|
||||
const void __user *sender_ubuffer,
|
||||
struct binder_buffer_object *parent,
|
||||
struct binder_buffer_object *sender_uparent,
|
||||
struct binder_transaction *t,
|
||||
struct binder_thread *thread,
|
||||
struct binder_transaction *in_reply_to)
|
||||
{
|
||||
binder_size_t fdi, fd_buf_size;
|
||||
binder_size_t fda_offset;
|
||||
const void __user *sender_ufda_base;
|
||||
struct binder_proc *proc = thread->proc;
|
||||
struct binder_proc *target_proc = t->to_proc;
|
||||
int ret;
|
||||
|
||||
fd_buf_size = sizeof(u32) * fda->num_fds;
|
||||
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
||||
@ -2266,19 +2508,25 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
||||
*/
|
||||
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
|
||||
fda->parent_offset;
|
||||
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
|
||||
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
|
||||
fda->parent_offset;
|
||||
|
||||
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
|
||||
!IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
|
||||
binder_user_error("%d:%d parent offset not aligned correctly.\n",
|
||||
proc->pid, thread->pid);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
||||
u32 fd;
|
||||
int ret;
|
||||
binder_size_t offset = fda_offset + fdi * sizeof(fd);
|
||||
binder_size_t sender_uoffset = fdi * sizeof(fd);
|
||||
|
||||
ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
|
||||
&fd, t->buffer,
|
||||
offset, sizeof(fd));
|
||||
ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
|
||||
if (!ret)
|
||||
ret = binder_translate_fd(fd, offset, t, thread,
|
||||
in_reply_to);
|
||||
@ -2288,7 +2536,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binder_fixup_parent(struct binder_transaction *t,
|
||||
static int binder_fixup_parent(struct list_head *pf_head,
|
||||
struct binder_transaction *t,
|
||||
struct binder_thread *thread,
|
||||
struct binder_buffer_object *bp,
|
||||
binder_size_t off_start_offset,
|
||||
@ -2334,14 +2583,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
|
||||
}
|
||||
buffer_offset = bp->parent_offset +
|
||||
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
|
||||
if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
|
||||
&bp->buffer, sizeof(bp->buffer))) {
|
||||
binder_user_error("%d:%d got transaction with invalid parent offset\n",
|
||||
proc->pid, thread->pid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2483,8 +2725,12 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
int t_debug_id = atomic_inc_return(&binder_last_id);
|
||||
char *secctx = NULL;
|
||||
u32 secctx_sz = 0;
|
||||
struct list_head sgc_head;
|
||||
struct list_head pf_head;
|
||||
const void __user *user_buffer = (const void __user *)
|
||||
(uintptr_t)tr->data.ptr.buffer;
|
||||
INIT_LIST_HEAD(&sgc_head);
|
||||
INIT_LIST_HEAD(&pf_head);
|
||||
|
||||
e = binder_transaction_log_add(&binder_transaction_log);
|
||||
e->debug_id = t_debug_id;
|
||||
@ -2951,6 +3197,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
case BINDER_TYPE_FDA: {
|
||||
struct binder_object ptr_object;
|
||||
binder_size_t parent_offset;
|
||||
struct binder_object user_object;
|
||||
size_t user_parent_size;
|
||||
struct binder_fd_array_object *fda =
|
||||
to_binder_fd_array_object(hdr);
|
||||
size_t num_valid = (buffer_offset - off_start_offset) /
|
||||
@ -2982,8 +3230,27 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error_line = __LINE__;
|
||||
goto err_bad_parent;
|
||||
}
|
||||
ret = binder_translate_fd_array(fda, parent, t, thread,
|
||||
in_reply_to);
|
||||
/*
|
||||
* We need to read the user version of the parent
|
||||
* object to get the original user offset
|
||||
*/
|
||||
user_parent_size =
|
||||
binder_get_object(proc, user_buffer, t->buffer,
|
||||
parent_offset, &user_object);
|
||||
if (user_parent_size != sizeof(user_object.bbo)) {
|
||||
binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
|
||||
proc->pid, thread->pid,
|
||||
user_parent_size,
|
||||
sizeof(user_object.bbo));
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = -EINVAL;
|
||||
return_error_line = __LINE__;
|
||||
goto err_bad_parent;
|
||||
}
|
||||
ret = binder_translate_fd_array(&pf_head, fda,
|
||||
user_buffer, parent,
|
||||
&user_object.bbo, t,
|
||||
thread, in_reply_to);
|
||||
if (!ret)
|
||||
ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
||||
t->buffer,
|
||||
@ -3013,19 +3280,14 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error_line = __LINE__;
|
||||
goto err_bad_offset;
|
||||
}
|
||||
if (binder_alloc_copy_user_to_buffer(
|
||||
&target_proc->alloc,
|
||||
t->buffer,
|
||||
sg_buf_offset,
|
||||
(const void __user *)
|
||||
(uintptr_t)bp->buffer,
|
||||
bp->length)) {
|
||||
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
||||
proc->pid, thread->pid);
|
||||
return_error_param = -EFAULT;
|
||||
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
|
||||
(const void __user *)(uintptr_t)bp->buffer,
|
||||
bp->length);
|
||||
if (ret) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = ret;
|
||||
return_error_line = __LINE__;
|
||||
goto err_copy_data_failed;
|
||||
goto err_translate_failed;
|
||||
}
|
||||
/* Fixup buffer pointer to target proc address space */
|
||||
bp->buffer = (uintptr_t)
|
||||
@ -3034,7 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
|
||||
num_valid = (buffer_offset - off_start_offset) /
|
||||
sizeof(binder_size_t);
|
||||
ret = binder_fixup_parent(t, thread, bp,
|
||||
ret = binder_fixup_parent(&pf_head, t,
|
||||
thread, bp,
|
||||
off_start_offset,
|
||||
num_valid,
|
||||
last_fixup_obj_off,
|
||||
@ -3074,6 +3337,17 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error_line = __LINE__;
|
||||
goto err_copy_data_failed;
|
||||
}
|
||||
|
||||
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
|
||||
&sgc_head, &pf_head);
|
||||
if (ret) {
|
||||
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
||||
proc->pid, thread->pid);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = ret;
|
||||
return_error_line = __LINE__;
|
||||
goto err_copy_data_failed;
|
||||
}
|
||||
if (t->buffer->oneway_spam_suspect)
|
||||
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
|
||||
else
|
||||
@ -3147,6 +3421,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
err_bad_offset:
|
||||
err_bad_parent:
|
||||
err_copy_data_failed:
|
||||
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
|
||||
binder_free_txn_fixups(t);
|
||||
trace_binder_transaction_failed_buffer_release(t->buffer);
|
||||
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
|
||||
|
@ -146,7 +146,7 @@ config SATA_AHCI_PLATFORM
|
||||
config AHCI_BRCM
|
||||
tristate "Broadcom AHCI SATA support"
|
||||
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
|
||||
ARCH_BCM_63XX
|
||||
ARCH_BCM_63XX || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the AHCI SATA3 controller found on
|
||||
@ -156,7 +156,7 @@ config AHCI_BRCM
|
||||
|
||||
config AHCI_DA850
|
||||
tristate "DaVinci DA850 AHCI SATA support"
|
||||
depends on ARCH_DAVINCI_DA850
|
||||
depends on ARCH_DAVINCI_DA850 || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the DaVinci DA850 SoC's
|
||||
@ -166,7 +166,7 @@ config AHCI_DA850
|
||||
|
||||
config AHCI_DM816
|
||||
tristate "DaVinci DM816 AHCI SATA support"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
depends on ARCH_OMAP2PLUS || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the DaVinci DM816 SoC's
|
||||
@ -206,7 +206,7 @@ config AHCI_CEVA
|
||||
|
||||
config AHCI_MTK
|
||||
tristate "MediaTek AHCI SATA support"
|
||||
depends on ARCH_MEDIATEK
|
||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||
select MFD_SYSCON
|
||||
select SATA_HOST
|
||||
help
|
||||
@ -217,7 +217,7 @@ config AHCI_MTK
|
||||
|
||||
config AHCI_MVEBU
|
||||
tristate "Marvell EBU AHCI SATA support"
|
||||
depends on ARCH_MVEBU
|
||||
depends on ARCH_MVEBU || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the Marvebu EBU SoC's
|
||||
@ -236,7 +236,7 @@ config AHCI_OCTEON
|
||||
|
||||
config AHCI_SUNXI
|
||||
tristate "Allwinner sunxi AHCI SATA support"
|
||||
depends on ARCH_SUNXI
|
||||
depends on ARCH_SUNXI || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the Allwinner sunxi SoC's
|
||||
@ -246,7 +246,7 @@ config AHCI_SUNXI
|
||||
|
||||
config AHCI_TEGRA
|
||||
tristate "NVIDIA Tegra AHCI SATA support"
|
||||
depends on ARCH_TEGRA
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for the NVIDIA Tegra SoC's
|
||||
@ -256,7 +256,7 @@ config AHCI_TEGRA
|
||||
|
||||
config AHCI_XGENE
|
||||
tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
|
||||
depends on PHY_XGENE
|
||||
depends on PHY_XGENE || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for APM X-Gene SoC SATA host controller.
|
||||
@ -273,7 +273,7 @@ config AHCI_QORIQ
|
||||
|
||||
config SATA_FSL
|
||||
tristate "Freescale 3.0Gbps SATA support"
|
||||
depends on FSL_SOC
|
||||
depends on FSL_SOC || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for Freescale 3.0Gbps SATA controller.
|
||||
@ -294,7 +294,7 @@ config SATA_GEMINI
|
||||
|
||||
config SATA_AHCI_SEATTLE
|
||||
tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
|
||||
depends on ARCH_SEATTLE
|
||||
depends on ARCH_SEATTLE || COMPILE_TEST
|
||||
select SATA_HOST
|
||||
help
|
||||
This option enables support for AMD Seattle SATA host controller.
|
||||
@ -432,18 +432,6 @@ config SATA_DWC_OLD_DMA
|
||||
This option enables support for old device trees without the
|
||||
"dmas" property.
|
||||
|
||||
config SATA_DWC_DEBUG
|
||||
bool "Debugging driver version"
|
||||
depends on SATA_DWC
|
||||
help
|
||||
This option enables debugging output in the driver.
|
||||
|
||||
config SATA_DWC_VDEBUG
|
||||
bool "Verbose debug output"
|
||||
depends on SATA_DWC_DEBUG
|
||||
help
|
||||
This option enables the taskfile dumping and NCQ debugging.
|
||||
|
||||
config SATA_HIGHBANK
|
||||
tristate "Calxeda Highbank SATA support"
|
||||
depends on ARCH_HIGHBANK || COMPILE_TEST
|
||||
@ -611,7 +599,7 @@ config PATA_ATP867X
|
||||
|
||||
config PATA_BK3710
|
||||
tristate "Palmchip BK3710 PATA support"
|
||||
depends on ARCH_DAVINCI
|
||||
depends on ARCH_DAVINCI || COMPILE_TEST
|
||||
select PATA_TIMINGS
|
||||
help
|
||||
This option enables support for the integrated IDE controller on
|
||||
@ -649,7 +637,7 @@ config PATA_CS5530
|
||||
|
||||
config PATA_CS5535
|
||||
tristate "CS5535 PATA support (Experimental)"
|
||||
depends on PCI && X86_32
|
||||
depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
|
||||
help
|
||||
This option enables support for the NatSemi/AMD CS5535
|
||||
companion chip used with the Geode processor family.
|
||||
@ -697,7 +685,7 @@ config PATA_EP93XX
|
||||
config PATA_FTIDE010
|
||||
tristate "Faraday Technology FTIDE010 PATA support"
|
||||
depends on OF
|
||||
depends on ARM
|
||||
depends on ARM || COMPILE_TEST
|
||||
depends on SATA_GEMINI
|
||||
help
|
||||
This option enables support for the Faraday FTIDE010
|
||||
@ -760,7 +748,7 @@ config PATA_ICSIDE
|
||||
|
||||
config PATA_IMX
|
||||
tristate "PATA support for Freescale iMX"
|
||||
depends on ARCH_MXC
|
||||
depends on ARCH_MXC || COMPILE_TEST
|
||||
select PATA_TIMINGS
|
||||
help
|
||||
This option enables support for the PATA host available on Freescale
|
||||
@ -981,7 +969,7 @@ config PATA_VIA
|
||||
|
||||
config PATA_PXA
|
||||
tristate "PXA DMA-capable PATA support"
|
||||
depends on ARCH_PXA
|
||||
depends on ARCH_PXA || COMPILE_TEST
|
||||
help
|
||||
This option enables support for harddrive attached to PXA CPU's bus.
|
||||
|
||||
@ -1157,7 +1145,7 @@ config PATA_RZ1000
|
||||
|
||||
config PATA_SAMSUNG_CF
|
||||
tristate "Samsung SoC PATA support"
|
||||
depends on SAMSUNG_DEV_IDE
|
||||
depends on SAMSUNG_DEV_IDE || COMPILE_TEST
|
||||
select PATA_TIMINGS
|
||||
help
|
||||
This option enables basic support for Samsung's S3C/S5P board
|
||||
|
@ -185,8 +185,6 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
|
||||
struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
|
||||
unsigned int si, last_si = 0;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
/*
|
||||
* Next, the S/G list.
|
||||
*/
|
||||
@ -362,8 +360,6 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
|
||||
struct ata_host *host;
|
||||
int n_ports, i, rc;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
|
||||
|
||||
ata_print_version_once(&pdev->dev, DRV_VERSION);
|
||||
|
@ -51,6 +51,7 @@ enum board_ids {
|
||||
board_ahci,
|
||||
board_ahci_ign_iferr,
|
||||
board_ahci_mobile,
|
||||
board_ahci_no_debounce_delay,
|
||||
board_ahci_nomsi,
|
||||
board_ahci_noncq,
|
||||
board_ahci_nosntf,
|
||||
@ -141,6 +142,13 @@ static const struct ata_port_info ahci_port_info[] = {
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
},
|
||||
[board_ahci_no_debounce_delay] = {
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
},
|
||||
[board_ahci_nomsi] = {
|
||||
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
@ -258,7 +266,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
|
||||
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
|
||||
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
|
||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
|
||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
|
||||
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
|
||||
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
|
||||
@ -316,7 +324,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
|
||||
@ -358,8 +366,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg/Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
|
||||
@ -394,10 +402,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
|
||||
@ -441,6 +445,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
board_ahci_al },
|
||||
/* AMD */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
|
||||
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
|
||||
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
|
||||
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
|
||||
/* AMD is using RAID class only for ahci controllers */
|
||||
@ -593,6 +598,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
|
||||
|
||||
/*
|
||||
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
|
||||
@ -687,7 +693,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
|
||||
|
||||
/* clear port IRQ */
|
||||
tmp = readl(port_mmio + PORT_IRQ_STAT);
|
||||
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
|
||||
dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
|
||||
if (tmp)
|
||||
writel(tmp, port_mmio + PORT_IRQ_STAT);
|
||||
}
|
||||
@ -703,8 +709,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
||||
bool online;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
hpriv->stop_engine(ap);
|
||||
|
||||
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
|
||||
@ -712,8 +716,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
||||
|
||||
hpriv->start_engine(ap);
|
||||
|
||||
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
|
||||
|
||||
/* vt8251 doesn't clear BSY on signature FIS reception,
|
||||
* request follow-up softreset.
|
||||
*/
|
||||
@ -793,8 +795,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
||||
bool online;
|
||||
int rc, i;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
hpriv->stop_engine(ap);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
@ -832,7 +832,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
||||
if (online)
|
||||
*class = ahci_dev_classify(ap);
|
||||
|
||||
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1479,7 +1478,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
|
||||
u32 irq_stat, irq_masked;
|
||||
unsigned int handled = 1;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
hpriv = host->private_data;
|
||||
mmio = hpriv->mmio;
|
||||
irq_stat = readl(mmio + HOST_IRQ_STAT);
|
||||
@ -1496,7 +1494,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
|
||||
irq_stat = readl(mmio + HOST_IRQ_STAT);
|
||||
spin_unlock(&host->lock);
|
||||
} while (irq_stat);
|
||||
VPRINTK("EXIT\n");
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
@ -1660,7 +1657,7 @@ static ssize_t remapped_nvme_show(struct device *dev,
|
||||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
|
||||
return sprintf(buf, "%u\n", hpriv->remapped_nvme);
|
||||
return sysfs_emit(buf, "%u\n", hpriv->remapped_nvme);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(remapped_nvme);
|
||||
@ -1676,8 +1673,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
int n_ports, i, rc;
|
||||
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
|
||||
|
||||
ata_print_version_once(&pdev->dev, DRV_VERSION);
|
||||
|
@ -376,8 +376,8 @@ struct ahci_host_priv {
|
||||
|
||||
extern int ahci_ignore_sss;
|
||||
|
||||
extern struct device_attribute *ahci_shost_attrs[];
|
||||
extern struct device_attribute *ahci_sdev_attrs[];
|
||||
extern const struct attribute_group *ahci_shost_groups[];
|
||||
extern const struct attribute_group *ahci_sdev_groups[];
|
||||
|
||||
/*
|
||||
* This must be instantiated by the edge drivers. Read the comments
|
||||
@ -388,8 +388,8 @@ extern struct device_attribute *ahci_sdev_attrs[];
|
||||
.can_queue = AHCI_MAX_CMDS, \
|
||||
.sg_tablesize = AHCI_MAX_SG, \
|
||||
.dma_boundary = AHCI_DMA_BOUNDARY, \
|
||||
.shost_attrs = ahci_shost_attrs, \
|
||||
.sdev_attrs = ahci_sdev_attrs, \
|
||||
.shost_groups = ahci_shost_groups, \
|
||||
.sdev_groups = ahci_sdev_groups, \
|
||||
.change_queue_depth = ata_scsi_change_queue_depth, \
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
|
@ -246,7 +246,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
|
||||
}
|
||||
|
||||
static unsigned int brcm_ahci_read_id(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, u16 *id)
|
||||
struct ata_taskfile *tf, __le16 *id)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
struct ata_host *host = ap->host;
|
||||
@ -333,7 +333,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = {
|
||||
|
||||
static const struct ata_port_info ahci_brcm_port_info = {
|
||||
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
|
||||
.link_flags = ATA_LFLAG_NO_DB_DELAY,
|
||||
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_brcm_platform_ops,
|
||||
|
@ -92,7 +92,7 @@ struct ceva_ahci_priv {
|
||||
};
|
||||
|
||||
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, u16 *id)
|
||||
struct ata_taskfile *tf, __le16 *id)
|
||||
{
|
||||
u32 err_mask;
|
||||
|
||||
|
@ -103,8 +103,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
|
||||
int rc;
|
||||
bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
hpriv->stop_engine(ap);
|
||||
|
||||
/*
|
||||
@ -146,8 +144,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
|
||||
|
||||
if (online)
|
||||
*class = ahci_dev_classify(ap);
|
||||
|
||||
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
|
||||
struct xgene_ahci_context *ctx = hpriv->plat_data;
|
||||
int rc = 0;
|
||||
u32 port_fbs;
|
||||
void *port_mmio = ahci_port_base(ap);
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
|
||||
/*
|
||||
* Write the pmp value to PxFBS.DEV
|
||||
@ -237,7 +237,7 @@ static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
|
||||
* does not support DEVSLP.
|
||||
*/
|
||||
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, u16 *id)
|
||||
struct ata_taskfile *tf, __le16 *id)
|
||||
{
|
||||
u32 err_mask;
|
||||
|
||||
@ -454,7 +454,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
|
||||
int pmp = sata_srst_pmp(link);
|
||||
struct ata_port *ap = link->ap;
|
||||
u32 rc;
|
||||
void *port_mmio = ahci_port_base(ap);
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 port_fbs;
|
||||
|
||||
/*
|
||||
@ -499,7 +499,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
|
||||
struct ata_port *ap = link->ap;
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
struct xgene_ahci_context *ctx = hpriv->plat_data;
|
||||
void *port_mmio = ahci_port_base(ap);
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 port_fbs;
|
||||
u32 port_fbs_save;
|
||||
u32 retry = 1;
|
||||
@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
|
||||
void __iomem *mmio;
|
||||
u32 irq_stat, irq_masked;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
hpriv = host->private_data;
|
||||
mmio = hpriv->mmio;
|
||||
|
||||
@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
VPRINTK("EXIT\n");
|
||||
|
||||
return IRQ_RETVAL(rc);
|
||||
}
|
||||
|
||||
|
@ -77,6 +77,7 @@
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <linux/libata.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <trace/events/libata.h>
|
||||
|
||||
#define DRV_NAME "ata_piix"
|
||||
#define DRV_VERSION "2.13"
|
||||
@ -816,10 +817,15 @@ static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
|
||||
static bool piix_irq_check(struct ata_port *ap)
|
||||
{
|
||||
unsigned char host_stat;
|
||||
|
||||
if (unlikely(!ap->ioaddr.bmdma_addr))
|
||||
return false;
|
||||
|
||||
return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
|
||||
host_stat = ap->ops->bmdma_status(ap);
|
||||
trace_ata_bmdma_status(ap, host_stat);
|
||||
|
||||
return host_stat & ATA_DMA_INTR;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
@ -1085,14 +1091,16 @@ static struct ata_port_operations ich_pata_ops = {
|
||||
.set_dmamode = ich_set_dmamode,
|
||||
};
|
||||
|
||||
static struct device_attribute *piix_sidpr_shost_attrs[] = {
|
||||
&dev_attr_link_power_management_policy,
|
||||
static struct attribute *piix_sidpr_shost_attrs[] = {
|
||||
&dev_attr_link_power_management_policy.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
ATTRIBUTE_GROUPS(piix_sidpr_shost);
|
||||
|
||||
static struct scsi_host_template piix_sidpr_sht = {
|
||||
ATA_BMDMA_SHT(DRV_NAME),
|
||||
.shost_attrs = piix_sidpr_shost_attrs,
|
||||
.shost_groups = piix_sidpr_shost_groups,
|
||||
};
|
||||
|
||||
static struct ata_port_operations piix_sidpr_sata_ops = {
|
||||
@ -1343,7 +1351,6 @@ static void piix_init_pcs(struct ata_host *host,
|
||||
new_pcs = pcs | map_db->port_enable;
|
||||
|
||||
if (new_pcs != pcs) {
|
||||
DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
|
||||
pci_write_config_word(pdev, ICH5_PCS, new_pcs);
|
||||
msleep(150);
|
||||
}
|
||||
@ -1767,14 +1774,12 @@ static int __init piix_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
DPRINTK("pci_register_driver\n");
|
||||
rc = pci_register_driver(&piix_pci_driver);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
in_module_init = 0;
|
||||
|
||||
DPRINTK("done\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -108,28 +108,46 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
|
||||
ahci_read_em_buffer, ahci_store_em_buffer);
|
||||
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
|
||||
|
||||
struct device_attribute *ahci_shost_attrs[] = {
|
||||
&dev_attr_link_power_management_policy,
|
||||
&dev_attr_em_message_type,
|
||||
&dev_attr_em_message,
|
||||
&dev_attr_ahci_host_caps,
|
||||
&dev_attr_ahci_host_cap2,
|
||||
&dev_attr_ahci_host_version,
|
||||
&dev_attr_ahci_port_cmd,
|
||||
&dev_attr_em_buffer,
|
||||
&dev_attr_em_message_supported,
|
||||
static struct attribute *ahci_shost_attrs[] = {
|
||||
&dev_attr_link_power_management_policy.attr,
|
||||
&dev_attr_em_message_type.attr,
|
||||
&dev_attr_em_message.attr,
|
||||
&dev_attr_ahci_host_caps.attr,
|
||||
&dev_attr_ahci_host_cap2.attr,
|
||||
&dev_attr_ahci_host_version.attr,
|
||||
&dev_attr_ahci_port_cmd.attr,
|
||||
&dev_attr_em_buffer.attr,
|
||||
&dev_attr_em_message_supported.attr,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ahci_shost_attrs);
|
||||
|
||||
struct device_attribute *ahci_sdev_attrs[] = {
|
||||
&dev_attr_sw_activity,
|
||||
&dev_attr_unload_heads,
|
||||
&dev_attr_ncq_prio_supported,
|
||||
&dev_attr_ncq_prio_enable,
|
||||
static const struct attribute_group ahci_shost_attr_group = {
|
||||
.attrs = ahci_shost_attrs
|
||||
};
|
||||
|
||||
const struct attribute_group *ahci_shost_groups[] = {
|
||||
&ahci_shost_attr_group,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
|
||||
EXPORT_SYMBOL_GPL(ahci_shost_groups);
|
||||
|
||||
static struct attribute *ahci_sdev_attrs[] = {
|
||||
&dev_attr_sw_activity.attr,
|
||||
&dev_attr_unload_heads.attr,
|
||||
&dev_attr_ncq_prio_supported.attr,
|
||||
&dev_attr_ncq_prio_enable.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group ahci_sdev_attr_group = {
|
||||
.attrs = ahci_sdev_attrs
|
||||
};
|
||||
|
||||
const struct attribute_group *ahci_sdev_groups[] = {
|
||||
&ahci_sdev_attr_group,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ahci_sdev_groups);
|
||||
|
||||
struct ata_port_operations ahci_ops = {
|
||||
.inherits = &sata_pmp_port_ops,
|
||||
@ -1216,12 +1234,12 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
|
||||
|
||||
/* clear SError */
|
||||
tmp = readl(port_mmio + PORT_SCR_ERR);
|
||||
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
|
||||
dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
|
||||
writel(tmp, port_mmio + PORT_SCR_ERR);
|
||||
|
||||
/* clear port IRQ */
|
||||
tmp = readl(port_mmio + PORT_IRQ_STAT);
|
||||
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
|
||||
dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
|
||||
if (tmp)
|
||||
writel(tmp, port_mmio + PORT_IRQ_STAT);
|
||||
|
||||
@ -1252,10 +1270,10 @@ void ahci_init_controller(struct ata_host *host)
|
||||
}
|
||||
|
||||
tmp = readl(mmio + HOST_CTL);
|
||||
VPRINTK("HOST_CTL 0x%x\n", tmp);
|
||||
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
|
||||
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
|
||||
tmp = readl(mmio + HOST_CTL);
|
||||
VPRINTK("HOST_CTL 0x%x\n", tmp);
|
||||
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_init_controller);
|
||||
|
||||
@ -1282,7 +1300,7 @@ unsigned int ahci_dev_classify(struct ata_port *ap)
|
||||
tf.lbal = (tmp >> 8) & 0xff;
|
||||
tf.nsect = (tmp) & 0xff;
|
||||
|
||||
return ata_dev_classify(&tf);
|
||||
return ata_port_classify(ap, &tf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_dev_classify);
|
||||
|
||||
@ -1397,8 +1415,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
|
||||
bool fbs_disabled = false;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* prepare for SRST (AHCI-1.1 10.4.1) */
|
||||
rc = ahci_kick_engine(ap);
|
||||
if (rc && rc != -EOPNOTSUPP)
|
||||
@ -1458,7 +1474,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
|
||||
if (fbs_disabled)
|
||||
ahci_enable_fbs(ap);
|
||||
|
||||
DPRINTK("EXIT, class=%u\n", *class);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -1480,8 +1495,6 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
|
||||
{
|
||||
int pmp = sata_srst_pmp(link);
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_do_softreset);
|
||||
@ -1511,8 +1524,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
|
||||
int rc;
|
||||
u32 irq_sts;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
rc = ahci_do_softreset(link, class, pmp, deadline,
|
||||
ahci_bad_pmp_check_ready);
|
||||
|
||||
@ -1546,8 +1557,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
|
||||
struct ata_taskfile tf;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
hpriv->stop_engine(ap);
|
||||
|
||||
/* clear D2H reception area to properly wait for D2H FIS */
|
||||
@ -1563,7 +1572,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
|
||||
if (*online)
|
||||
*class = ahci_dev_classify(ap);
|
||||
|
||||
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_do_hardreset);
|
||||
@ -1602,8 +1610,6 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
|
||||
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
|
||||
unsigned int si;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
/*
|
||||
* Next, the S/G list.
|
||||
*/
|
||||
@ -1677,7 +1683,6 @@ static void ahci_fbs_dec_intr(struct ata_port *ap)
|
||||
u32 fbs = readl(port_mmio + PORT_FBS);
|
||||
int retries = 3;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
BUG_ON(!pp->fbs_enabled);
|
||||
|
||||
/* time to wait for DEC is not specified by AHCI spec,
|
||||
@ -1906,8 +1911,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 status;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
status = readl(port_mmio + PORT_IRQ_STAT);
|
||||
writel(status, port_mmio + PORT_IRQ_STAT);
|
||||
|
||||
@ -1915,8 +1918,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
|
||||
ahci_handle_port_interrupt(ap, port_mmio, status);
|
||||
spin_unlock(ap->lock);
|
||||
|
||||
VPRINTK("EXIT\n");
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -1933,9 +1934,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
|
||||
ap = host->ports[i];
|
||||
if (ap) {
|
||||
ahci_port_intr(ap);
|
||||
VPRINTK("port %u\n", i);
|
||||
} else {
|
||||
VPRINTK("port %u (no irq)\n", i);
|
||||
if (ata_ratelimit())
|
||||
dev_warn(host->dev,
|
||||
"interrupt on disabled port %u\n", i);
|
||||
@ -1956,8 +1955,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
|
||||
void __iomem *mmio;
|
||||
u32 irq_stat, irq_masked;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
hpriv = host->private_data;
|
||||
mmio = hpriv->mmio;
|
||||
|
||||
@ -1985,8 +1982,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
VPRINTK("EXIT\n");
|
||||
|
||||
return IRQ_RETVAL(rc);
|
||||
}
|
||||
|
||||
|
@ -579,11 +579,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
|
||||
int i, irq, n_ports, rc;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
if (irq != -EPROBE_DEFER)
|
||||
dev_err(dev, "no irq\n");
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
@ -642,13 +639,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
|
||||
if (hpriv->cap & HOST_CAP_64) {
|
||||
rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (rc) {
|
||||
rc = dma_coerce_mask_and_coherent(dev,
|
||||
DMA_BIT_MASK(32));
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to enable 64-bit DMA.\n");
|
||||
return rc;
|
||||
}
|
||||
dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
|
||||
dev_err(dev, "Failed to enable 64-bit DMA.\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,7 +402,6 @@ EXPORT_SYMBOL_GPL(ata_acpi_stm);
|
||||
*/
|
||||
static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
acpi_status status;
|
||||
struct acpi_buffer output;
|
||||
union acpi_object *out_obj;
|
||||
@ -418,10 +417,6 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
|
||||
output.length = ACPI_ALLOCATE_BUFFER;
|
||||
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
|
||||
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
|
||||
__func__, ap->port_no);
|
||||
|
||||
/* _GTF has no input parameters */
|
||||
status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
|
||||
&output);
|
||||
@ -437,11 +432,9 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
|
||||
}
|
||||
|
||||
if (!output.length || !output.pointer) {
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
|
||||
__func__,
|
||||
(unsigned long long)output.length,
|
||||
output.pointer);
|
||||
ata_dev_dbg(dev, "Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
|
||||
(unsigned long long)output.length,
|
||||
output.pointer);
|
||||
rc = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
@ -464,9 +457,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
|
||||
rc = out_obj->buffer.length / REGS_PER_GTF;
|
||||
if (gtf) {
|
||||
*gtf = (void *)out_obj->buffer.pointer;
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
|
||||
__func__, *gtf, rc);
|
||||
ata_dev_dbg(dev, "returning gtf=%p, gtf_count=%d\n",
|
||||
*gtf, rc);
|
||||
}
|
||||
return rc;
|
||||
|
||||
@ -650,9 +642,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
|
||||
struct ata_taskfile *pptf = NULL;
|
||||
struct ata_taskfile tf, ptf, rtf;
|
||||
unsigned int err_mask;
|
||||
const char *level;
|
||||
const char *descr;
|
||||
char msg[60];
|
||||
int rc;
|
||||
|
||||
if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
|
||||
@ -666,6 +656,8 @@ static int ata_acpi_run_tf(struct ata_device *dev,
|
||||
pptf = &ptf;
|
||||
}
|
||||
|
||||
descr = ata_get_cmd_name(tf.command);
|
||||
|
||||
if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
|
||||
rtf = tf;
|
||||
err_mask = ata_exec_internal(dev, &rtf, NULL,
|
||||
@ -673,40 +665,42 @@ static int ata_acpi_run_tf(struct ata_device *dev,
|
||||
|
||||
switch (err_mask) {
|
||||
case 0:
|
||||
level = KERN_DEBUG;
|
||||
snprintf(msg, sizeof(msg), "succeeded");
|
||||
ata_dev_dbg(dev,
|
||||
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
"(%s) succeeded\n",
|
||||
tf.command, tf.feature, tf.nsect, tf.lbal,
|
||||
tf.lbam, tf.lbah, tf.device, descr);
|
||||
rc = 1;
|
||||
break;
|
||||
|
||||
case AC_ERR_DEV:
|
||||
level = KERN_INFO;
|
||||
snprintf(msg, sizeof(msg),
|
||||
"rejected by device (Stat=0x%02x Err=0x%02x)",
|
||||
rtf.command, rtf.feature);
|
||||
ata_dev_info(dev,
|
||||
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
"(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
|
||||
tf.command, tf.feature, tf.nsect, tf.lbal,
|
||||
tf.lbam, tf.lbah, tf.device, descr,
|
||||
rtf.command, rtf.feature);
|
||||
rc = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
level = KERN_ERR;
|
||||
snprintf(msg, sizeof(msg),
|
||||
"failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
|
||||
err_mask, rtf.command, rtf.feature);
|
||||
ata_dev_err(dev,
|
||||
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
"(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
|
||||
tf.command, tf.feature, tf.nsect, tf.lbal,
|
||||
tf.lbam, tf.lbah, tf.device, descr,
|
||||
err_mask, rtf.command, rtf.feature);
|
||||
rc = -EIO;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
level = KERN_INFO;
|
||||
snprintf(msg, sizeof(msg), "filtered out");
|
||||
ata_dev_info(dev,
|
||||
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
"(%s) filtered out\n",
|
||||
tf.command, tf.feature, tf.nsect, tf.lbal,
|
||||
tf.lbam, tf.lbah, tf.device, descr);
|
||||
rc = 0;
|
||||
}
|
||||
descr = ata_get_cmd_descript(tf.command);
|
||||
|
||||
ata_dev_printk(dev, level,
|
||||
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
|
||||
tf.command, tf.feature, tf.nsect, tf.lbal,
|
||||
tf.lbam, tf.lbah, tf.device,
|
||||
(descr ? descr : "unknown"), msg);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -776,9 +770,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
|
||||
struct acpi_object_list input;
|
||||
union acpi_object in_params[1];
|
||||
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
|
||||
__func__, dev->devno, ap->port_no);
|
||||
ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
|
||||
__func__, dev->devno, ap->port_no);
|
||||
|
||||
/* Give the drive Identify data to the drive via the _SDD method */
|
||||
/* _SDD: set up input parameters */
|
||||
|
@ -764,9 +764,6 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
||||
head = track % dev->heads;
|
||||
sect = (u32)block % dev->sectors + 1;
|
||||
|
||||
DPRINTK("block %u track %u cyl %u head %u sect %u\n",
|
||||
(u32)block, track, cyl, head, sect);
|
||||
|
||||
/* Check whether the converted CHS can fit.
|
||||
Cylinder: 0-65535
|
||||
Head: 0-15
|
||||
@ -1010,32 +1007,21 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
|
||||
* SEMB signature. This is worked around in
|
||||
* ata_dev_read_id().
|
||||
*/
|
||||
if ((tf->lbam == 0) && (tf->lbah == 0)) {
|
||||
DPRINTK("found ATA device by sig\n");
|
||||
if (tf->lbam == 0 && tf->lbah == 0)
|
||||
return ATA_DEV_ATA;
|
||||
}
|
||||
|
||||
if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
|
||||
DPRINTK("found ATAPI device by sig\n");
|
||||
if (tf->lbam == 0x14 && tf->lbah == 0xeb)
|
||||
return ATA_DEV_ATAPI;
|
||||
}
|
||||
|
||||
if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
|
||||
DPRINTK("found PMP device by sig\n");
|
||||
if (tf->lbam == 0x69 && tf->lbah == 0x96)
|
||||
return ATA_DEV_PMP;
|
||||
}
|
||||
|
||||
if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
|
||||
DPRINTK("found SEMB device by sig (could be ATA device)\n");
|
||||
if (tf->lbam == 0x3c && tf->lbah == 0xc3)
|
||||
return ATA_DEV_SEMB;
|
||||
}
|
||||
|
||||
if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
|
||||
DPRINTK("found ZAC device by sig\n");
|
||||
if (tf->lbam == 0xcd && tf->lbah == 0xab)
|
||||
return ATA_DEV_ZAC;
|
||||
}
|
||||
|
||||
DPRINTK("unknown device\n");
|
||||
return ATA_DEV_UNKNOWN;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_dev_classify);
|
||||
@ -1355,6 +1341,7 @@ static int ata_hpa_resize(struct ata_device *dev)
|
||||
|
||||
/**
|
||||
* ata_dump_id - IDENTIFY DEVICE info debugging output
|
||||
* @dev: device from which the information is fetched
|
||||
* @id: IDENTIFY DEVICE page to dump
|
||||
*
|
||||
* Dump selected 16-bit words from the given IDENTIFY DEVICE
|
||||
@ -1364,32 +1351,14 @@ static int ata_hpa_resize(struct ata_device *dev)
|
||||
* caller.
|
||||
*/
|
||||
|
||||
static inline void ata_dump_id(const u16 *id)
|
||||
static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
|
||||
{
|
||||
DPRINTK("49==0x%04x "
|
||||
"53==0x%04x "
|
||||
"63==0x%04x "
|
||||
"64==0x%04x "
|
||||
"75==0x%04x \n",
|
||||
id[49],
|
||||
id[53],
|
||||
id[63],
|
||||
id[64],
|
||||
id[75]);
|
||||
DPRINTK("80==0x%04x "
|
||||
"81==0x%04x "
|
||||
"82==0x%04x "
|
||||
"83==0x%04x "
|
||||
"84==0x%04x \n",
|
||||
id[80],
|
||||
id[81],
|
||||
id[82],
|
||||
id[83],
|
||||
id[84]);
|
||||
DPRINTK("88==0x%04x "
|
||||
"93==0x%04x\n",
|
||||
id[88],
|
||||
id[93]);
|
||||
ata_dev_dbg(dev,
|
||||
"49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
|
||||
"80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
|
||||
"88==0x%04x 93==0x%04x\n",
|
||||
id[49], id[53], id[63], id[64], id[75], id[80],
|
||||
id[81], id[82], id[83], id[84], id[88], id[93]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1602,9 +1571,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
||||
else
|
||||
ata_qc_complete(qc);
|
||||
|
||||
if (ata_msg_warn(ap))
|
||||
ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
|
||||
command);
|
||||
ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
|
||||
command);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
@ -1754,7 +1722,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
|
||||
* this function is wrapped or replaced by the driver
|
||||
*/
|
||||
unsigned int ata_do_dev_read_id(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, u16 *id)
|
||||
struct ata_taskfile *tf, __le16 *id)
|
||||
{
|
||||
return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
|
||||
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
|
||||
@ -1794,9 +1762,6 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
|
||||
int may_fallback = 1, tried_spinup = 0;
|
||||
int rc;
|
||||
|
||||
if (ata_msg_ctl(ap))
|
||||
ata_dev_dbg(dev, "%s: ENTER\n", __func__);
|
||||
|
||||
retry:
|
||||
ata_tf_init(dev, &tf);
|
||||
|
||||
@ -1830,9 +1795,9 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
|
||||
tf.flags |= ATA_TFLAG_POLLING;
|
||||
|
||||
if (ap->ops->read_id)
|
||||
err_mask = ap->ops->read_id(dev, &tf, id);
|
||||
err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
|
||||
else
|
||||
err_mask = ata_do_dev_read_id(dev, &tf, id);
|
||||
err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
|
||||
|
||||
if (err_mask) {
|
||||
if (err_mask & AC_ERR_NODEV_HINT) {
|
||||
@ -1879,10 +1844,10 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
|
||||
}
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
|
||||
ata_dev_dbg(dev, "dumping IDENTIFY data, "
|
||||
ata_dev_info(dev, "dumping IDENTIFY data, "
|
||||
"class=%d may_fallback=%d tried_spinup=%d\n",
|
||||
class, may_fallback, tried_spinup);
|
||||
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
|
||||
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
|
||||
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
|
||||
}
|
||||
|
||||
@ -1966,9 +1931,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
if (ata_msg_warn(ap))
|
||||
ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
|
||||
reason, err_mask);
|
||||
ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
|
||||
reason, err_mask);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1996,7 +1960,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
|
||||
unsigned int err_mask;
|
||||
bool dma = false;
|
||||
|
||||
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
|
||||
ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
|
||||
|
||||
/*
|
||||
* Return error without actually issuing the command on controllers
|
||||
@ -2043,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
|
||||
return false;
|
||||
|
||||
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
|
||||
return false;
|
||||
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
|
||||
@ -2053,8 +2020,19 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
unsigned int err, i;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
|
||||
return false;
|
||||
|
||||
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
|
||||
ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
|
||||
/*
|
||||
* IDENTIFY DEVICE data log is defined as mandatory starting
|
||||
* with ACS-3 (ATA version 10). Warn about the missing log
|
||||
* for drives which implement this ATA level or above.
|
||||
*/
|
||||
if (ata_id_major_version(dev->id) >= 10)
|
||||
ata_dev_warn(dev,
|
||||
"ATA Identify Device Log not supported\n");
|
||||
dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2379,7 +2357,6 @@ static void ata_dev_config_trusted(struct ata_device *dev)
|
||||
|
||||
static int ata_dev_config_lba(struct ata_device *dev)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
const u16 *id = dev->id;
|
||||
const char *lba_desc;
|
||||
char ncq_desc[24];
|
||||
@ -2401,7 +2378,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
|
||||
ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
|
||||
|
||||
/* print device info to dmesg */
|
||||
if (ata_msg_drv(ap) && ata_dev_print_info(dev))
|
||||
if (ata_dev_print_info(dev))
|
||||
ata_dev_info(dev,
|
||||
"%llu sectors, multi %u: %s %s\n",
|
||||
(unsigned long long)dev->n_sectors,
|
||||
@ -2412,7 +2389,6 @@ static int ata_dev_config_lba(struct ata_device *dev)
|
||||
|
||||
static void ata_dev_config_chs(struct ata_device *dev)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
const u16 *id = dev->id;
|
||||
|
||||
if (ata_id_current_chs_valid(id)) {
|
||||
@ -2428,7 +2404,7 @@ static void ata_dev_config_chs(struct ata_device *dev)
|
||||
}
|
||||
|
||||
/* print device info to dmesg */
|
||||
if (ata_msg_drv(ap) && ata_dev_print_info(dev))
|
||||
if (ata_dev_print_info(dev))
|
||||
ata_dev_info(dev,
|
||||
"%llu sectors, multi %u, CHS %u/%u/%u\n",
|
||||
(unsigned long long)dev->n_sectors,
|
||||
@ -2464,18 +2440,68 @@ static void ata_dev_config_devslp(struct ata_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void ata_dev_config_cpr(struct ata_device *dev)
|
||||
{
|
||||
unsigned int err_mask;
|
||||
size_t buf_len;
|
||||
int i, nr_cpr = 0;
|
||||
struct ata_cpr_log *cpr_log = NULL;
|
||||
u8 *desc, *buf = NULL;
|
||||
|
||||
if (ata_id_major_version(dev->id) < 11 ||
|
||||
!ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Read the concurrent positioning ranges log (0x47). We can have at
|
||||
* most 255 32B range descriptors plus a 64B header.
|
||||
*/
|
||||
buf_len = (64 + 255 * 32 + 511) & ~511;
|
||||
buf = kzalloc(buf_len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto out;
|
||||
|
||||
err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
|
||||
0, buf, buf_len >> 9);
|
||||
if (err_mask)
|
||||
goto out;
|
||||
|
||||
nr_cpr = buf[0];
|
||||
if (!nr_cpr)
|
||||
goto out;
|
||||
|
||||
cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
|
||||
if (!cpr_log)
|
||||
goto out;
|
||||
|
||||
cpr_log->nr_cpr = nr_cpr;
|
||||
desc = &buf[64];
|
||||
for (i = 0; i < nr_cpr; i++, desc += 32) {
|
||||
cpr_log->cpr[i].num = desc[0];
|
||||
cpr_log->cpr[i].num_storage_elements = desc[1];
|
||||
cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
|
||||
cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
|
||||
}
|
||||
|
||||
out:
|
||||
swap(dev->cpr_log, cpr_log);
|
||||
kfree(cpr_log);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void ata_dev_print_features(struct ata_device *dev)
|
||||
{
|
||||
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
|
||||
return;
|
||||
|
||||
ata_dev_info(dev,
|
||||
"Features:%s%s%s%s%s\n",
|
||||
"Features:%s%s%s%s%s%s\n",
|
||||
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
|
||||
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
|
||||
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
|
||||
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
|
||||
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "");
|
||||
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
|
||||
dev->cpr_log ? " CPR" : "");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2503,14 +2529,11 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
char modelbuf[ATA_ID_PROD_LEN+1];
|
||||
int rc;
|
||||
|
||||
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
|
||||
ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
|
||||
if (!ata_dev_enabled(dev)) {
|
||||
ata_dev_dbg(dev, "no device\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: ENTER\n", __func__);
|
||||
|
||||
/* set horkage */
|
||||
dev->horkage |= ata_dev_blacklisted(dev);
|
||||
ata_force_horkage(dev);
|
||||
@ -2558,13 +2581,12 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
return rc;
|
||||
|
||||
/* print device capabilities */
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev,
|
||||
"%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
|
||||
"85:%04x 86:%04x 87:%04x 88:%04x\n",
|
||||
__func__,
|
||||
id[49], id[82], id[83], id[84],
|
||||
id[85], id[86], id[87], id[88]);
|
||||
ata_dev_dbg(dev,
|
||||
"%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
|
||||
"85:%04x 86:%04x 87:%04x 88:%04x\n",
|
||||
__func__,
|
||||
id[49], id[82], id[83], id[84],
|
||||
id[85], id[86], id[87], id[88]);
|
||||
|
||||
/* initialize to-be-configured parameters */
|
||||
dev->flags &= ~ATA_DFLAG_CFG_MASK;
|
||||
@ -2583,8 +2605,7 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
/* find max transfer mode; for printk only */
|
||||
xfer_mask = ata_id_xfermask(id);
|
||||
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dump_id(id);
|
||||
ata_dump_id(dev, id);
|
||||
|
||||
/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
|
||||
ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
|
||||
@ -2622,7 +2643,7 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
}
|
||||
|
||||
/* print device info to dmesg */
|
||||
if (ata_msg_drv(ap) && print_info)
|
||||
if (print_info)
|
||||
ata_dev_info(dev, "%s: %s, %s, max %s\n",
|
||||
revbuf, modelbuf, fwrevbuf,
|
||||
ata_mode_string(xfer_mask));
|
||||
@ -2639,9 +2660,10 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
ata_dev_config_sense_reporting(dev);
|
||||
ata_dev_config_zac(dev);
|
||||
ata_dev_config_trusted(dev);
|
||||
ata_dev_config_cpr(dev);
|
||||
dev->cdb_len = 32;
|
||||
|
||||
if (ata_msg_drv(ap) && print_info)
|
||||
if (print_info)
|
||||
ata_dev_print_features(dev);
|
||||
}
|
||||
|
||||
@ -2654,8 +2676,7 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
|
||||
rc = atapi_cdb_len(id);
|
||||
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
|
||||
if (ata_msg_warn(ap))
|
||||
ata_dev_warn(dev, "unsupported CDB len\n");
|
||||
ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
|
||||
rc = -EINVAL;
|
||||
goto err_out_nosup;
|
||||
}
|
||||
@ -2699,7 +2720,7 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
}
|
||||
|
||||
/* print device info to dmesg */
|
||||
if (ata_msg_drv(ap) && print_info)
|
||||
if (print_info)
|
||||
ata_dev_info(dev,
|
||||
"ATAPI: %s, %s, max %s%s%s%s\n",
|
||||
modelbuf, fwrevbuf,
|
||||
@ -2716,7 +2737,7 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
/* Limit PATA drive on SATA cable bridge transfers to udma5,
|
||||
200 sectors */
|
||||
if (ata_dev_knobble(dev)) {
|
||||
if (ata_msg_drv(ap) && print_info)
|
||||
if (print_info)
|
||||
ata_dev_info(dev, "applying bridge limits\n");
|
||||
dev->udma_mask &= ATA_UDMA5;
|
||||
dev->max_sectors = ATA_MAX_SECTORS;
|
||||
@ -2765,8 +2786,6 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
return 0;
|
||||
|
||||
err_out_nosup:
|
||||
if (ata_msg_probe(ap))
|
||||
ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3309,8 +3328,8 @@ static int ata_dev_set_mode(struct ata_device *dev)
|
||||
dev_err_whine = " (device error ignored)";
|
||||
}
|
||||
|
||||
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
|
||||
dev->xfer_shift, (int)dev->xfer_mode);
|
||||
ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
|
||||
dev->xfer_shift, (int)dev->xfer_mode);
|
||||
|
||||
if (!(ehc->i.flags & ATA_EHI_QUIET) ||
|
||||
ehc->i.flags & ATA_EHI_DID_HARDRESET)
|
||||
@ -3624,16 +3643,12 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes)
|
||||
{
|
||||
u32 serror;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* reset complete, clear SError */
|
||||
if (!sata_scr_read(link, SCR_ERROR, &serror))
|
||||
sata_scr_write(link, SCR_ERROR, serror);
|
||||
|
||||
/* print link status */
|
||||
sata_print_link_status(link);
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_std_postreset);
|
||||
|
||||
@ -4060,6 +4075,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
|
||||
/*
|
||||
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
|
||||
* log page is accessed. Ensure we never ask for this log page with
|
||||
* these devices.
|
||||
*/
|
||||
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
|
||||
|
||||
/* End Marker */
|
||||
{ }
|
||||
};
|
||||
@ -4261,7 +4283,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
unsigned int err_mask;
|
||||
|
||||
/* set up set-features taskfile */
|
||||
DPRINTK("set features - xfer mode\n");
|
||||
ata_dev_dbg(dev, "set features - xfer mode\n");
|
||||
|
||||
/* Some controllers and ATAPI devices show flaky interrupt
|
||||
* behavior after setting xfer mode. Use polling instead.
|
||||
@ -4283,7 +4305,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
/* On some disks, this command causes spin-up, so we need longer timeout */
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
}
|
||||
|
||||
@ -4309,7 +4330,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
|
||||
unsigned long timeout = 0;
|
||||
|
||||
/* set up set-features taskfile */
|
||||
DPRINTK("set features - SATA features\n");
|
||||
ata_dev_dbg(dev, "set features - SATA features\n");
|
||||
|
||||
ata_tf_init(dev, &tf);
|
||||
tf.command = ATA_CMD_SET_FEATURES;
|
||||
@ -4323,7 +4344,6 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
|
||||
ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_dev_set_feature);
|
||||
@ -4351,7 +4371,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
|
||||
return AC_ERR_INVALID;
|
||||
|
||||
/* set up init dev params taskfile */
|
||||
DPRINTK("init dev params \n");
|
||||
ata_dev_dbg(dev, "init dev params \n");
|
||||
|
||||
ata_tf_init(dev, &tf);
|
||||
tf.command = ATA_CMD_INIT_DEV_PARAMS;
|
||||
@ -4367,7 +4387,6 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
|
||||
if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
|
||||
err_mask = 0;
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
}
|
||||
|
||||
@ -4479,8 +4498,6 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
|
||||
|
||||
WARN_ON_ONCE(sg == NULL);
|
||||
|
||||
VPRINTK("unmapping %u sg elements\n", qc->n_elem);
|
||||
|
||||
if (qc->n_elem)
|
||||
dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
|
||||
|
||||
@ -4506,13 +4523,10 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned int n_elem;
|
||||
|
||||
VPRINTK("ENTER, ata%u\n", ap->print_id);
|
||||
|
||||
n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
|
||||
if (n_elem < 1)
|
||||
return -1;
|
||||
|
||||
VPRINTK("%d sg elements mapped\n", n_elem);
|
||||
qc->orig_n_elem = qc->n_elem;
|
||||
qc->n_elem = n_elem;
|
||||
qc->flags |= ATA_QCFLAG_DMAMAP;
|
||||
@ -4867,6 +4881,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
||||
return;
|
||||
}
|
||||
|
||||
trace_ata_qc_prep(qc);
|
||||
qc->err_mask |= ap->ops->qc_prep(qc);
|
||||
if (unlikely(qc->err_mask))
|
||||
goto err;
|
||||
@ -5312,8 +5327,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
||||
{
|
||||
struct ata_port *ap;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
|
||||
if (!ap)
|
||||
return NULL;
|
||||
@ -5325,15 +5338,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
||||
ap->host = host;
|
||||
ap->dev = host->dev;
|
||||
|
||||
#if defined(ATA_VERBOSE_DEBUG)
|
||||
/* turn on all debugging levels */
|
||||
ap->msg_enable = 0x00FF;
|
||||
#elif defined(ATA_DEBUG)
|
||||
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
|
||||
#else
|
||||
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
|
||||
#endif
|
||||
|
||||
mutex_init(&ap->scsi_scan_mutex);
|
||||
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
|
||||
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
|
||||
@ -5430,8 +5434,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
|
||||
int i;
|
||||
void *dr;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* alloc a container for our list of ATA ports (buses) */
|
||||
sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
|
||||
host = kzalloc(sz, GFP_KERNEL);
|
||||
@ -5721,9 +5723,7 @@ int ata_port_probe(struct ata_port *ap)
|
||||
__ata_port_probe(ap);
|
||||
ata_port_wait_eh(ap);
|
||||
} else {
|
||||
DPRINTK("ata%u: bus probe begin\n", ap->print_id);
|
||||
rc = ata_bus_probe(ap);
|
||||
DPRINTK("ata%u: bus probe end\n", ap->print_id);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@ -6490,69 +6490,14 @@ const struct ata_port_info ata_dummy_port_info = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
|
||||
|
||||
/*
|
||||
* Utility print functions
|
||||
*/
|
||||
void ata_port_printk(const struct ata_port *ap, const char *level,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
printk("%sata%u: %pV", level, ap->print_id, &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL(ata_port_printk);
|
||||
|
||||
void ata_link_printk(const struct ata_link *link, const char *level,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
if (sata_pmp_attached(link->ap) || link->ap->slave_link)
|
||||
printk("%sata%u.%02u: %pV",
|
||||
level, link->ap->print_id, link->pmp, &vaf);
|
||||
else
|
||||
printk("%sata%u: %pV",
|
||||
level, link->ap->print_id, &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL(ata_link_printk);
|
||||
|
||||
void ata_dev_printk(const struct ata_device *dev, const char *level,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
printk("%sata%u.%02u: %pV",
|
||||
level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
|
||||
&vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL(ata_dev_printk);
|
||||
|
||||
void ata_print_version(const struct device *dev, const char *version)
|
||||
{
|
||||
dev_printk(KERN_DEBUG, dev, "version %s\n", version);
|
||||
}
|
||||
EXPORT_SYMBOL(ata_print_version);
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
|
||||
|
@ -533,8 +533,6 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||
unsigned long flags;
|
||||
LIST_HEAD(eh_work_q);
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
list_splice_init(&host->eh_cmd_q, &eh_work_q);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
@ -548,7 +546,6 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||
/* finish or retry handled scmd's and clean up */
|
||||
WARN_ON(!list_empty(&eh_work_q));
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -940,7 +937,7 @@ void ata_std_sched_eh(struct ata_port *ap)
|
||||
ata_eh_set_pending(ap, 1);
|
||||
scsi_schedule_eh(ap->scsi_host);
|
||||
|
||||
DPRINTK("port EH scheduled\n");
|
||||
trace_ata_std_sched_eh(ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_std_sched_eh);
|
||||
|
||||
@ -1070,7 +1067,7 @@ static void __ata_port_freeze(struct ata_port *ap)
|
||||
|
||||
ap->pflags |= ATA_PFLAG_FROZEN;
|
||||
|
||||
DPRINTK("ata%u port frozen\n", ap->print_id);
|
||||
trace_ata_port_freeze(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1147,7 +1144,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
|
||||
DPRINTK("ata%u port thawed\n", ap->print_id);
|
||||
trace_ata_port_thaw(ap);
|
||||
}
|
||||
|
||||
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
|
||||
@ -1217,8 +1214,7 @@ void ata_dev_disable(struct ata_device *dev)
|
||||
if (!ata_dev_enabled(dev))
|
||||
return;
|
||||
|
||||
if (ata_msg_drv(dev->link->ap))
|
||||
ata_dev_warn(dev, "disabled\n");
|
||||
ata_dev_warn(dev, "disable device\n");
|
||||
ata_acpi_on_disable(dev);
|
||||
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
|
||||
dev->class++;
|
||||
@ -1287,6 +1283,8 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
unsigned long flags;
|
||||
|
||||
trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
|
||||
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
ata_eh_clear_action(link, dev, ehi, action);
|
||||
@ -1317,6 +1315,8 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev,
|
||||
{
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
|
||||
trace_ata_eh_done(link, dev ? dev->devno : 0, action);
|
||||
|
||||
ata_eh_clear_action(link, dev, &ehc->i, action);
|
||||
}
|
||||
|
||||
@ -1421,8 +1421,6 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc,
|
||||
return;
|
||||
}
|
||||
|
||||
DPRINTK("ATA request sense\n");
|
||||
|
||||
ata_tf_init(dev, &tf);
|
||||
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
||||
@ -1463,8 +1461,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
struct ata_taskfile tf;
|
||||
|
||||
DPRINTK("ATAPI request sense\n");
|
||||
|
||||
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
/* initialize sense_buf with the error register,
|
||||
@ -1928,8 +1924,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
|
||||
u32 serror;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
|
||||
return;
|
||||
|
||||
@ -2036,7 +2030,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
|
||||
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
|
||||
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
|
||||
}
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2086,16 +2079,15 @@ void ata_eh_autopsy(struct ata_port *ap)
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_get_cmd_descript - get description for ATA command
|
||||
* @command: ATA command code to get description for
|
||||
* ata_get_cmd_name - get name for ATA command
|
||||
* @command: ATA command code to get name for
|
||||
*
|
||||
* Return a textual description of the given command, or NULL if the
|
||||
* command is not known.
|
||||
* Return a textual name of the given command or "unknown"
|
||||
*
|
||||
* LOCKING:
|
||||
* None
|
||||
*/
|
||||
const char *ata_get_cmd_descript(u8 command)
|
||||
const char *ata_get_cmd_name(u8 command)
|
||||
{
|
||||
#ifdef CONFIG_ATA_VERBOSE_ERROR
|
||||
static const struct
|
||||
@ -2203,9 +2195,9 @@ const char *ata_get_cmd_descript(u8 command)
|
||||
return cmd_descr[i].text;
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
return "unknown";
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
|
||||
EXPORT_SYMBOL_GPL(ata_get_cmd_name);
|
||||
|
||||
/**
|
||||
* ata_eh_link_report - report error handling to user
|
||||
@ -2354,12 +2346,9 @@ static void ata_eh_link_report(struct ata_link *link)
|
||||
}
|
||||
__scsi_format_command(cdb_buf, sizeof(cdb_buf),
|
||||
cdb, cdb_len);
|
||||
} else {
|
||||
const char *descr = ata_get_cmd_descript(cmd->command);
|
||||
if (descr)
|
||||
ata_dev_err(qc->dev, "failed command: %s\n",
|
||||
descr);
|
||||
}
|
||||
} else
|
||||
ata_dev_err(qc->dev, "failed command: %s\n",
|
||||
ata_get_cmd_name(cmd->command));
|
||||
|
||||
ata_dev_err(qc->dev,
|
||||
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
|
||||
@ -2596,12 +2585,19 @@ int ata_eh_reset(struct ata_link *link, int classify,
|
||||
|
||||
/* mark that this EH session started with reset */
|
||||
ehc->last_reset = jiffies;
|
||||
if (reset == hardreset)
|
||||
if (reset == hardreset) {
|
||||
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
|
||||
else
|
||||
trace_ata_link_hardreset_begin(link, classes, deadline);
|
||||
} else {
|
||||
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
|
||||
trace_ata_link_softreset_begin(link, classes, deadline);
|
||||
}
|
||||
|
||||
rc = ata_do_reset(link, reset, classes, deadline, true);
|
||||
if (reset == hardreset)
|
||||
trace_ata_link_hardreset_end(link, classes, rc);
|
||||
else
|
||||
trace_ata_link_softreset_end(link, classes, rc);
|
||||
if (rc && rc != -EAGAIN) {
|
||||
failed_link = link;
|
||||
goto fail;
|
||||
@ -2615,8 +2611,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
|
||||
ata_link_info(slave, "hard resetting link\n");
|
||||
|
||||
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
|
||||
trace_ata_slave_hardreset_begin(slave, classes,
|
||||
deadline);
|
||||
tmp = ata_do_reset(slave, reset, classes, deadline,
|
||||
false);
|
||||
trace_ata_slave_hardreset_end(slave, classes, tmp);
|
||||
switch (tmp) {
|
||||
case -EAGAIN:
|
||||
rc = -EAGAIN;
|
||||
@ -2644,7 +2643,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
|
||||
}
|
||||
|
||||
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
|
||||
trace_ata_link_softreset_begin(link, classes, deadline);
|
||||
rc = ata_do_reset(link, reset, classes, deadline, true);
|
||||
trace_ata_link_softreset_end(link, classes, rc);
|
||||
if (rc) {
|
||||
failed_link = link;
|
||||
goto fail;
|
||||
@ -2698,8 +2699,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
|
||||
*/
|
||||
if (postreset) {
|
||||
postreset(link, classes);
|
||||
if (slave)
|
||||
trace_ata_link_postreset(link, classes, rc);
|
||||
if (slave) {
|
||||
postreset(slave, classes);
|
||||
trace_ata_slave_postreset(slave, classes, rc);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2921,8 +2925,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* For PATA drive side cable detection to work, IDENTIFY must
|
||||
* be done backwards such that PDIAG- is released by the slave
|
||||
* device before the master device is identified.
|
||||
@ -3036,7 +3038,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
||||
|
||||
err:
|
||||
*r_failed_dev = dev;
|
||||
DPRINTK("EXIT rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3551,8 +3552,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||
int rc, nr_fails;
|
||||
unsigned long flags, deadline;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* prep for recovery */
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
@ -3760,7 +3759,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||
if (rc && r_failed_link)
|
||||
*r_failed_link = link;
|
||||
|
||||
DPRINTK("EXIT, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -652,8 +652,6 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
|
||||
u32 *gscr = (void *)ap->sector_buf;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
|
||||
|
||||
if (!ata_dev_enabled(dev)) {
|
||||
@ -686,12 +684,10 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
|
||||
|
||||
ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
|
||||
|
||||
DPRINTK("EXIT, rc=0\n");
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
|
||||
DPRINTK("EXIT, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -759,8 +755,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
|
||||
int detach = 0, rc = 0;
|
||||
int reval_failed = 0;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
if (dev->flags & ATA_DFLAG_DETACH) {
|
||||
detach = 1;
|
||||
rc = -ENODEV;
|
||||
@ -828,7 +822,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
|
||||
/* okay, PMP resurrected */
|
||||
ehc->i.flags = 0;
|
||||
|
||||
DPRINTK("EXIT, rc=0\n");
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -838,7 +831,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
|
||||
else
|
||||
ata_dev_disable(dev);
|
||||
|
||||
DPRINTK("EXIT, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
|
||||
* immediately after resuming. Delay 200ms before
|
||||
* debouncing.
|
||||
*/
|
||||
if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
|
||||
if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
|
||||
ata_msleep(link->ap, 200);
|
||||
|
||||
/* is SControl restored correctly? */
|
||||
@ -533,8 +533,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
|
||||
u32 scontrol;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
if (online)
|
||||
*online = false;
|
||||
|
||||
@ -610,7 +608,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
|
||||
*online = false;
|
||||
ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
|
||||
}
|
||||
DPRINTK("EXIT, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sata_link_hardreset);
|
||||
@ -827,7 +824,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
|
||||
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
|
||||
return -EINVAL;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
ata_lpm_policy_names[ap->target_lpm_policy]);
|
||||
}
|
||||
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
|
||||
@ -876,7 +873,7 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device,
|
||||
ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
|
||||
spin_unlock_irq(ap->lock);
|
||||
|
||||
return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
|
||||
return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
|
||||
}
|
||||
|
||||
static ssize_t ata_ncq_prio_enable_store(struct device *device,
|
||||
@ -922,13 +919,22 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
|
||||
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
|
||||
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
|
||||
|
||||
struct device_attribute *ata_ncq_sdev_attrs[] = {
|
||||
&dev_attr_unload_heads,
|
||||
&dev_attr_ncq_prio_enable,
|
||||
&dev_attr_ncq_prio_supported,
|
||||
static struct attribute *ata_ncq_sdev_attrs[] = {
|
||||
&dev_attr_unload_heads.attr,
|
||||
&dev_attr_ncq_prio_enable.attr,
|
||||
&dev_attr_ncq_prio_supported.attr,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
|
||||
|
||||
static const struct attribute_group ata_ncq_sdev_attr_group = {
|
||||
.attrs = ata_ncq_sdev_attrs
|
||||
};
|
||||
|
||||
const struct attribute_group *ata_ncq_sdev_groups[] = {
|
||||
&ata_ncq_sdev_attr_group,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
|
||||
|
||||
static ssize_t
|
||||
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
|
||||
@ -963,7 +969,7 @@ ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct ata_port *ap = ata_shost_to_port(shost);
|
||||
|
||||
return snprintf(buf, 23, "%d\n", ap->em_message_type);
|
||||
return sysfs_emit(buf, "%d\n", ap->em_message_type);
|
||||
}
|
||||
DEVICE_ATTR(em_message_type, S_IRUGO,
|
||||
ata_scsi_em_message_type_show, NULL);
|
||||
@ -1252,13 +1258,11 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
ata_scsi_dump_cdb(ap, cmd);
|
||||
|
||||
if (likely(ata_dev_enabled(ap->link.device)))
|
||||
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
|
||||
else {
|
||||
cmd->result = (DID_BAD_TARGET << 16);
|
||||
cmd->scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
|
||||
unlock:
|
||||
spin_unlock_irq(ap->lock);
|
||||
|
||||
return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
|
||||
return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
|
||||
}
|
||||
|
||||
static ssize_t ata_scsi_park_store(struct device *device,
|
||||
@ -234,11 +234,20 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
|
||||
field, 0xff, 0);
|
||||
}
|
||||
|
||||
struct device_attribute *ata_common_sdev_attrs[] = {
|
||||
&dev_attr_unload_heads,
|
||||
static struct attribute *ata_common_sdev_attrs[] = {
|
||||
&dev_attr_unload_heads.attr,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
|
||||
|
||||
static const struct attribute_group ata_common_sdev_attr_group = {
|
||||
.attrs = ata_common_sdev_attrs
|
||||
};
|
||||
|
||||
const struct attribute_group *ata_common_sdev_groups[] = {
|
||||
&ata_common_sdev_attr_group,
|
||||
NULL
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
|
||||
|
||||
/**
|
||||
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
|
||||
@ -634,7 +643,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
|
||||
qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
|
||||
if (qc) {
|
||||
qc->scsicmd = cmd;
|
||||
qc->scsidone = cmd->scsi_done;
|
||||
qc->scsidone = scsi_done;
|
||||
|
||||
qc->sg = scsi_sglist(cmd);
|
||||
qc->n_elem = scsi_sg_count(cmd);
|
||||
@ -643,7 +652,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
|
||||
qc->flags |= ATA_QCFLAG_QUIET;
|
||||
} else {
|
||||
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
|
||||
cmd->scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
}
|
||||
|
||||
return qc;
|
||||
@ -659,7 +668,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
|
||||
|
||||
/**
|
||||
* ata_dump_status - user friendly display of error info
|
||||
* @id: id of the port in question
|
||||
* @ap: the port in question
|
||||
* @tf: ptr to filled out taskfile
|
||||
*
|
||||
* Decode and dump the ATA error/status registers for the user so
|
||||
@ -669,37 +678,32 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
|
||||
* LOCKING:
|
||||
* inherited from caller
|
||||
*/
|
||||
static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
|
||||
static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
{
|
||||
u8 stat = tf->command, err = tf->feature;
|
||||
|
||||
pr_warn("ata%u: status=0x%02x { ", id, stat);
|
||||
if (stat & ATA_BUSY) {
|
||||
pr_cont("Busy }\n"); /* Data is not valid in this case */
|
||||
ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
|
||||
} else {
|
||||
if (stat & ATA_DRDY) pr_cont("DriveReady ");
|
||||
if (stat & ATA_DF) pr_cont("DeviceFault ");
|
||||
if (stat & ATA_DSC) pr_cont("SeekComplete ");
|
||||
if (stat & ATA_DRQ) pr_cont("DataRequest ");
|
||||
if (stat & ATA_CORR) pr_cont("CorrectedError ");
|
||||
if (stat & ATA_SENSE) pr_cont("Sense ");
|
||||
if (stat & ATA_ERR) pr_cont("Error ");
|
||||
pr_cont("}\n");
|
||||
|
||||
if (err) {
|
||||
pr_warn("ata%u: error=0x%02x { ", id, err);
|
||||
if (err & ATA_ABORTED) pr_cont("DriveStatusError ");
|
||||
if (err & ATA_ICRC) {
|
||||
if (err & ATA_ABORTED)
|
||||
pr_cont("BadCRC ");
|
||||
else pr_cont("Sector ");
|
||||
}
|
||||
if (err & ATA_UNC) pr_cont("UncorrectableError ");
|
||||
if (err & ATA_IDNF) pr_cont("SectorIdNotFound ");
|
||||
if (err & ATA_TRK0NF) pr_cont("TrackZeroNotFound ");
|
||||
if (err & ATA_AMNF) pr_cont("AddrMarkNotFound ");
|
||||
pr_cont("}\n");
|
||||
}
|
||||
ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
|
||||
stat & ATA_DRDY ? "DriveReady " : "",
|
||||
stat & ATA_DF ? "DeviceFault " : "",
|
||||
stat & ATA_DSC ? "SeekComplete " : "",
|
||||
stat & ATA_DRQ ? "DataRequest " : "",
|
||||
stat & ATA_CORR ? "CorrectedError " : "",
|
||||
stat & ATA_SENSE ? "Sense " : "",
|
||||
stat & ATA_ERR ? "Error " : "");
|
||||
if (err)
|
||||
ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
|
||||
err & ATA_ABORTED ?
|
||||
"DriveStatusError " : "",
|
||||
err & ATA_ICRC ?
|
||||
(err & ATA_ABORTED ?
|
||||
"BadCRC " : "Sector ") : "",
|
||||
err & ATA_UNC ? "UncorrectableError " : "",
|
||||
err & ATA_IDNF ? "SectorIdNotFound " : "",
|
||||
err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
|
||||
err & ATA_AMNF ? "AddrMarkNotFound " : "");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1290,8 +1294,6 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
|
||||
u64 lba = 0;
|
||||
u32 len;
|
||||
|
||||
VPRINTK("six-byte command\n");
|
||||
|
||||
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
|
||||
lba |= ((u64)cdb[2]) << 8;
|
||||
lba |= ((u64)cdb[3]);
|
||||
@ -1317,8 +1319,6 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
|
||||
u64 lba = 0;
|
||||
u32 len = 0;
|
||||
|
||||
VPRINTK("ten-byte command\n");
|
||||
|
||||
lba |= ((u64)cdb[2]) << 24;
|
||||
lba |= ((u64)cdb[3]) << 16;
|
||||
lba |= ((u64)cdb[4]) << 8;
|
||||
@ -1346,8 +1346,6 @@ static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
|
||||
u64 lba = 0;
|
||||
u32 len = 0;
|
||||
|
||||
VPRINTK("sixteen-byte command\n");
|
||||
|
||||
lba |= ((u64)cdb[2]) << 56;
|
||||
lba |= ((u64)cdb[3]) << 48;
|
||||
lba |= ((u64)cdb[4]) << 40;
|
||||
@ -1460,9 +1458,6 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
|
||||
head = track % dev->heads;
|
||||
sect = (u32)block % dev->sectors + 1;
|
||||
|
||||
DPRINTK("block %u track %u cyl %u head %u sect %u\n",
|
||||
(u32)block, track, cyl, head, sect);
|
||||
|
||||
/* Check whether the converted CHS can fit.
|
||||
Cylinder: 0-65535
|
||||
Head: 0-15
|
||||
@ -1585,7 +1580,6 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
|
||||
goto invalid_fld;
|
||||
break;
|
||||
default:
|
||||
DPRINTK("no-byte command\n");
|
||||
fp = 0;
|
||||
goto invalid_fld;
|
||||
}
|
||||
@ -1663,7 +1657,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
|
||||
cmd->result = SAM_STAT_GOOD;
|
||||
|
||||
if (need_sense && !ap->ops->error_handler)
|
||||
ata_dump_status(ap->print_id, &qc->result_tf);
|
||||
ata_dump_status(ap, &qc->result_tf);
|
||||
|
||||
ata_qc_done(qc);
|
||||
}
|
||||
@ -1701,8 +1695,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
|
||||
struct ata_queued_cmd *qc;
|
||||
int rc;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
qc = ata_scsi_qc_new(dev, cmd);
|
||||
if (!qc)
|
||||
goto err_mem;
|
||||
@ -1733,26 +1725,22 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
|
||||
/* select device, send command to hardware */
|
||||
ata_qc_issue(qc);
|
||||
|
||||
VPRINTK("EXIT\n");
|
||||
return 0;
|
||||
|
||||
early_finish:
|
||||
ata_qc_free(qc);
|
||||
cmd->scsi_done(cmd);
|
||||
DPRINTK("EXIT - early finish (good or error)\n");
|
||||
scsi_done(cmd);
|
||||
return 0;
|
||||
|
||||
err_did:
|
||||
ata_qc_free(qc);
|
||||
cmd->result = (DID_ERROR << 16);
|
||||
cmd->scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
err_mem:
|
||||
DPRINTK("EXIT - internal\n");
|
||||
return 0;
|
||||
|
||||
defer:
|
||||
ata_qc_free(qc);
|
||||
DPRINTK("EXIT - defer\n");
|
||||
if (rc == ATA_DEFER_LINK)
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
else
|
||||
@ -1849,8 +1837,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
|
||||
2
|
||||
};
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
/* set scsi removable (RMB) bit per ata bit, or if the
|
||||
* AHCI port says it's external (Hotplug-capable, eSATA).
|
||||
*/
|
||||
@ -1895,7 +1881,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
|
||||
*/
|
||||
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
|
||||
{
|
||||
int num_pages;
|
||||
int i, num_pages = 0;
|
||||
static const u8 pages[] = {
|
||||
0x00, /* page 0x00, this page */
|
||||
0x80, /* page 0x80, unit serial no page */
|
||||
@ -1905,13 +1891,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
|
||||
0xb1, /* page 0xb1, block device characteristics page */
|
||||
0xb2, /* page 0xb2, thin provisioning page */
|
||||
0xb6, /* page 0xb6, zoned block device characteristics */
|
||||
0xb9, /* page 0xb9, concurrent positioning ranges */
|
||||
};
|
||||
|
||||
num_pages = sizeof(pages);
|
||||
if (!(args->dev->flags & ATA_DFLAG_ZAC))
|
||||
num_pages--;
|
||||
for (i = 0; i < sizeof(pages); i++) {
|
||||
if (pages[i] == 0xb6 &&
|
||||
!(args->dev->flags & ATA_DFLAG_ZAC))
|
||||
continue;
|
||||
rbuf[num_pages + 4] = pages[i];
|
||||
num_pages++;
|
||||
}
|
||||
rbuf[3] = num_pages; /* number of supported VPD pages */
|
||||
memcpy(rbuf + 4, pages, num_pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2121,6 +2111,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
|
||||
{
|
||||
struct ata_cpr_log *cpr_log = args->dev->cpr_log;
|
||||
u8 *desc = &rbuf[64];
|
||||
int i;
|
||||
|
||||
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
|
||||
rbuf[1] = 0xb9;
|
||||
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
|
||||
|
||||
for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
|
||||
desc[0] = cpr_log->cpr[i].num;
|
||||
desc[1] = cpr_log->cpr[i].num_storage_elements;
|
||||
put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
|
||||
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* modecpy - Prepare response for MODE SENSE
|
||||
* @dest: output buffer
|
||||
@ -2261,8 +2271,6 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
|
||||
u8 dpofua, bp = 0xff;
|
||||
u16 fp;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
six_byte = (scsicmd[0] == MODE_SENSE);
|
||||
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
|
||||
/*
|
||||
@ -2380,8 +2388,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
||||
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
|
||||
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
if (args->cmd->cmnd[0] == READ_CAPACITY) {
|
||||
if (last_lba >= 0xffffffffULL)
|
||||
last_lba = 0xffffffff;
|
||||
@ -2448,7 +2454,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
||||
*/
|
||||
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
|
||||
{
|
||||
VPRINTK("ENTER\n");
|
||||
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
|
||||
|
||||
return 0;
|
||||
@ -2479,8 +2484,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
|
||||
DPRINTK("ATAPI request sense\n");
|
||||
|
||||
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
@ -2519,8 +2522,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
|
||||
qc->complete_fn = atapi_sense_complete;
|
||||
|
||||
ata_qc_issue(qc);
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2548,8 +2549,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
|
||||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
unsigned int err_mask = qc->err_mask;
|
||||
|
||||
VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
|
||||
|
||||
/* handle completion from new EH */
|
||||
if (unlikely(qc->ap->ops->error_handler &&
|
||||
(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
|
||||
@ -2630,7 +2629,6 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
||||
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
qc->tf.flags |= ATA_TFLAG_WRITE;
|
||||
DPRINTK("direction: write\n");
|
||||
}
|
||||
|
||||
qc->tf.command = ATA_CMD_PACKET;
|
||||
@ -2992,7 +2990,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
|
||||
ata_qc_set_pc_nbytes(qc);
|
||||
|
||||
/* We may not issue DMA commands if no DMA mode is set */
|
||||
if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
|
||||
if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
|
||||
fp = 1;
|
||||
goto invalid_fld;
|
||||
}
|
||||
@ -3142,7 +3140,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
||||
u8 unmap = cdb[1] & 0x8;
|
||||
|
||||
/* we may not issue DMA commands if no DMA mode is set */
|
||||
if (unlikely(!dev->dma_mode))
|
||||
if (unlikely(!ata_dma_enabled(dev)))
|
||||
goto invalid_opcode;
|
||||
|
||||
/*
|
||||
@ -3558,10 +3556,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
|
||||
*/
|
||||
|
||||
if (len != CACHE_MPAGE_LEN - 2) {
|
||||
if (len < CACHE_MPAGE_LEN - 2)
|
||||
*fp = len;
|
||||
else
|
||||
*fp = CACHE_MPAGE_LEN - 2;
|
||||
*fp = min(len, CACHE_MPAGE_LEN - 2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -3614,10 +3609,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
|
||||
*/
|
||||
|
||||
if (len != CONTROL_MPAGE_LEN - 2) {
|
||||
if (len < CONTROL_MPAGE_LEN - 2)
|
||||
*fp = len;
|
||||
else
|
||||
*fp = CONTROL_MPAGE_LEN - 2;
|
||||
*fp = min(len, CONTROL_MPAGE_LEN - 2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -3665,8 +3657,6 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
|
||||
u8 buffer[64];
|
||||
const u8 *p = buffer;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
six_byte = (cdb[0] == MODE_SELECT);
|
||||
if (six_byte) {
|
||||
if (scmd->cmd_len < 5) {
|
||||
@ -3964,72 +3954,47 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsi_dump_cdb - dump SCSI command contents to dmesg
|
||||
* @ap: ATA port to which the command was being sent
|
||||
* @cmd: SCSI command to dump
|
||||
*
|
||||
* Prints the contents of a SCSI command via printk().
|
||||
*/
|
||||
|
||||
void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
|
||||
{
|
||||
#ifdef ATA_VERBOSE_DEBUG
|
||||
struct scsi_device *scsidev = cmd->device;
|
||||
|
||||
VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
|
||||
ap->print_id,
|
||||
scsidev->channel, scsidev->id, scsidev->lun,
|
||||
cmd->cmnd);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
|
||||
{
|
||||
u8 scsi_op = scmd->cmnd[0];
|
||||
ata_xlat_func_t xlat_func;
|
||||
int rc = 0;
|
||||
|
||||
if (unlikely(!scmd->cmd_len))
|
||||
goto bad_cdb_len;
|
||||
|
||||
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
|
||||
if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
|
||||
if (unlikely(scmd->cmd_len > dev->cdb_len))
|
||||
goto bad_cdb_len;
|
||||
|
||||
xlat_func = ata_get_xlat_func(dev, scsi_op);
|
||||
} else {
|
||||
if (unlikely(!scmd->cmd_len))
|
||||
} else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
|
||||
/* relay SCSI command to ATAPI device */
|
||||
int len = COMMAND_SIZE(scsi_op);
|
||||
|
||||
if (unlikely(len > scmd->cmd_len ||
|
||||
len > dev->cdb_len ||
|
||||
scmd->cmd_len > ATAPI_CDB_LEN))
|
||||
goto bad_cdb_len;
|
||||
|
||||
xlat_func = NULL;
|
||||
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
|
||||
/* relay SCSI command to ATAPI device */
|
||||
int len = COMMAND_SIZE(scsi_op);
|
||||
if (unlikely(len > scmd->cmd_len ||
|
||||
len > dev->cdb_len ||
|
||||
scmd->cmd_len > ATAPI_CDB_LEN))
|
||||
goto bad_cdb_len;
|
||||
xlat_func = atapi_xlat;
|
||||
} else {
|
||||
/* ATA_16 passthru, treat as an ATA command */
|
||||
if (unlikely(scmd->cmd_len > 16))
|
||||
goto bad_cdb_len;
|
||||
|
||||
xlat_func = atapi_xlat;
|
||||
} else {
|
||||
/* ATA_16 passthru, treat as an ATA command */
|
||||
if (unlikely(scmd->cmd_len > 16))
|
||||
goto bad_cdb_len;
|
||||
|
||||
xlat_func = ata_get_xlat_func(dev, scsi_op);
|
||||
}
|
||||
xlat_func = ata_get_xlat_func(dev, scsi_op);
|
||||
}
|
||||
|
||||
if (xlat_func)
|
||||
rc = ata_scsi_translate(dev, scmd, xlat_func);
|
||||
else
|
||||
ata_scsi_simulate(dev, scmd);
|
||||
return ata_scsi_translate(dev, scmd, xlat_func);
|
||||
|
||||
return rc;
|
||||
ata_scsi_simulate(dev, scmd);
|
||||
|
||||
return 0;
|
||||
|
||||
bad_cdb_len:
|
||||
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
|
||||
scmd->cmd_len, scsi_op, dev->cdb_len);
|
||||
scmd->result = DID_ERROR << 16;
|
||||
scmd->scsi_done(scmd);
|
||||
scsi_done(scmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4064,14 +4029,12 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
||||
|
||||
spin_lock_irqsave(ap->lock, irq_flags);
|
||||
|
||||
ata_scsi_dump_cdb(ap, cmd);
|
||||
|
||||
dev = ata_scsi_find_dev(ap, scsidev);
|
||||
if (likely(dev))
|
||||
rc = __ata_scsi_queuecmd(cmd, dev);
|
||||
else {
|
||||
cmd->result = (DID_BAD_TARGET << 16);
|
||||
cmd->scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, irq_flags);
|
||||
@ -4131,11 +4094,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
||||
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
|
||||
break;
|
||||
case 0xb6:
|
||||
if (dev->flags & ATA_DFLAG_ZAC) {
|
||||
if (dev->flags & ATA_DFLAG_ZAC)
|
||||
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
else
|
||||
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
|
||||
break;
|
||||
case 0xb9:
|
||||
if (dev->cpr_log)
|
||||
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
|
||||
else
|
||||
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
|
||||
break;
|
||||
default:
|
||||
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
|
||||
break;
|
||||
@ -4199,7 +4168,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
cmd->scsi_done(cmd);
|
||||
scsi_done(cmd);
|
||||
}
|
||||
|
||||
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
|
||||
@ -4492,12 +4461,9 @@ void ata_scsi_hotplug(struct work_struct *work)
|
||||
container_of(work, struct ata_port, hotplug_task.work);
|
||||
int i;
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_UNLOADING) {
|
||||
DPRINTK("ENTER/EXIT - unloading\n");
|
||||
if (ap->pflags & ATA_PFLAG_UNLOADING)
|
||||
return;
|
||||
}
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
mutex_lock(&ap->scsi_scan_mutex);
|
||||
|
||||
/* Unplug detached devices. We cannot use link iterator here
|
||||
@ -4513,7 +4479,6 @@ void ata_scsi_hotplug(struct work_struct *work)
|
||||
ata_scsi_scan_host(ap, 0);
|
||||
|
||||
mutex_unlock(&ap->scsi_scan_mutex);
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/libata.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <trace/events/libata.h>
|
||||
#include "libata.h"
|
||||
|
||||
static struct workqueue_struct *ata_sff_wq;
|
||||
@ -330,10 +330,6 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
|
||||
static void ata_dev_select(struct ata_port *ap, unsigned int device,
|
||||
unsigned int wait, unsigned int can_sleep)
|
||||
{
|
||||
if (ata_msg_probe(ap))
|
||||
ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
|
||||
device, wait);
|
||||
|
||||
if (wait)
|
||||
ata_wait_idle(ap);
|
||||
|
||||
@ -409,12 +405,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
|
||||
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
|
||||
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
||||
tf->hob_feature,
|
||||
tf->hob_nsect,
|
||||
tf->hob_lbal,
|
||||
tf->hob_lbam,
|
||||
tf->hob_lbah);
|
||||
}
|
||||
|
||||
if (is_addr) {
|
||||
@ -423,18 +413,10 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
iowrite8(tf->lbal, ioaddr->lbal_addr);
|
||||
iowrite8(tf->lbam, ioaddr->lbam_addr);
|
||||
iowrite8(tf->lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
|
||||
tf->feature,
|
||||
tf->nsect,
|
||||
tf->lbal,
|
||||
tf->lbam,
|
||||
tf->lbah);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE) {
|
||||
if (tf->flags & ATA_TFLAG_DEVICE)
|
||||
iowrite8(tf->device, ioaddr->device_addr);
|
||||
VPRINTK("device 0x%X\n", tf->device);
|
||||
}
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
@ -494,8 +476,6 @@ EXPORT_SYMBOL_GPL(ata_sff_tf_read);
|
||||
*/
|
||||
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
|
||||
|
||||
iowrite8(tf->command, ap->ioaddr.command_addr);
|
||||
ata_sff_pause(ap);
|
||||
}
|
||||
@ -505,6 +485,7 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
|
||||
* ata_tf_to_host - issue ATA taskfile to host controller
|
||||
* @ap: port to which command is being issued
|
||||
* @tf: ATA taskfile register set
|
||||
* @tag: tag of the associated command
|
||||
*
|
||||
* Issues ATA taskfile register set to ATA host controller,
|
||||
* with proper synchronization with interrupt handler and
|
||||
@ -514,9 +495,12 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
static inline void ata_tf_to_host(struct ata_port *ap,
|
||||
const struct ata_taskfile *tf)
|
||||
const struct ata_taskfile *tf,
|
||||
unsigned int tag)
|
||||
{
|
||||
trace_ata_tf_load(ap, tf);
|
||||
ap->ops->sff_tf_load(ap, tf);
|
||||
trace_ata_exec_command(ap, tf, tag);
|
||||
ap->ops->sff_exec_command(ap, tf);
|
||||
}
|
||||
|
||||
@ -680,7 +664,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||
page = nth_page(page, (offset >> PAGE_SHIFT));
|
||||
offset %= PAGE_SIZE;
|
||||
|
||||
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
|
||||
trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
|
||||
|
||||
/*
|
||||
* Split the transfer when it splits a page boundary. Note that the
|
||||
@ -750,7 +734,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
|
||||
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
|
||||
{
|
||||
/* send SCSI cdb */
|
||||
DPRINTK("send cdb\n");
|
||||
trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
|
||||
WARN_ON_ONCE(qc->dev->cdb_len < 12);
|
||||
|
||||
ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
|
||||
@ -768,6 +752,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
|
||||
case ATAPI_PROT_DMA:
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
/* initiate bmdma */
|
||||
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_start(qc);
|
||||
break;
|
||||
#endif /* CONFIG_ATA_BMDMA */
|
||||
@ -820,7 +805,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
||||
/* don't cross page boundaries */
|
||||
count = min(count, (unsigned int)PAGE_SIZE - offset);
|
||||
|
||||
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
|
||||
trace_atapi_pio_transfer_data(qc, offset, count);
|
||||
|
||||
/* do the actual data transfer */
|
||||
buf = kmap_atomic(page);
|
||||
@ -888,8 +873,6 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
|
||||
if (unlikely(!bytes))
|
||||
goto atapi_check;
|
||||
|
||||
VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
|
||||
|
||||
if (unlikely(__atapi_pio_bytes(qc, bytes)))
|
||||
goto err_out;
|
||||
ata_sff_sync(ap); /* flush */
|
||||
@ -1002,8 +985,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
|
||||
|
||||
fsm_start:
|
||||
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
|
||||
ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
|
||||
trace_ata_sff_hsm_state(qc, status);
|
||||
|
||||
switch (ap->hsm_task_state) {
|
||||
case HSM_ST_FIRST:
|
||||
@ -1204,8 +1186,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
}
|
||||
|
||||
/* no more data to transfer */
|
||||
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
|
||||
ap->print_id, qc->dev->devno, status);
|
||||
trace_ata_sff_hsm_command_complete(qc, status);
|
||||
|
||||
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
|
||||
|
||||
@ -1262,7 +1243,7 @@ EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
|
||||
|
||||
void ata_sff_flush_pio_task(struct ata_port *ap)
|
||||
{
|
||||
DPRINTK("ENTER\n");
|
||||
trace_ata_sff_flush_pio_task(ap);
|
||||
|
||||
cancel_delayed_work_sync(&ap->sff_pio_task);
|
||||
|
||||
@ -1279,9 +1260,6 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
|
||||
spin_unlock_irq(ap->lock);
|
||||
|
||||
ap->sff_pio_task_link = NULL;
|
||||
|
||||
if (ata_msg_ctl(ap))
|
||||
ata_port_dbg(ap, "%s: EXIT\n", __func__);
|
||||
}
|
||||
|
||||
static void ata_sff_pio_task(struct work_struct *work)
|
||||
@ -1376,7 +1354,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
ata_qc_set_polling(qc);
|
||||
|
||||
ata_tf_to_host(ap, &qc->tf);
|
||||
ata_tf_to_host(ap, &qc->tf, qc->tag);
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
@ -1388,7 +1366,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
ata_qc_set_polling(qc);
|
||||
|
||||
ata_tf_to_host(ap, &qc->tf);
|
||||
ata_tf_to_host(ap, &qc->tf, qc->tag);
|
||||
|
||||
if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
||||
/* PIO data out protocol */
|
||||
@ -1418,7 +1396,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
ata_qc_set_polling(qc);
|
||||
|
||||
ata_tf_to_host(ap, &qc->tf);
|
||||
ata_tf_to_host(ap, &qc->tf, qc->tag);
|
||||
|
||||
ap->hsm_task_state = HSM_ST_FIRST;
|
||||
|
||||
@ -1478,8 +1456,7 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
|
||||
{
|
||||
u8 status;
|
||||
|
||||
VPRINTK("ata%u: protocol %d task_state %d\n",
|
||||
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
|
||||
trace_ata_sff_port_intr(qc, hsmv_on_idle);
|
||||
|
||||
/* Check whether we are expecting interrupt in this state */
|
||||
switch (ap->hsm_task_state) {
|
||||
@ -1853,7 +1830,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
|
||||
return ATA_DEV_NONE;
|
||||
|
||||
/* determine if device is ATA or ATAPI */
|
||||
class = ata_dev_classify(&tf);
|
||||
class = ata_port_classify(ap, &tf);
|
||||
|
||||
if (class == ATA_DEV_UNKNOWN) {
|
||||
/* If the device failed diagnostic, it's likely to
|
||||
@ -1956,8 +1933,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
|
||||
|
||||
if (ap->ioaddr.ctl_addr) {
|
||||
/* software reset. causes dev0 to be selected */
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
@ -1995,8 +1970,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
|
||||
int rc;
|
||||
u8 err;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* determine if device 0/1 are present */
|
||||
if (ata_devchk(ap, 0))
|
||||
devmask |= (1 << 0);
|
||||
@ -2007,7 +1980,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
|
||||
ap->ops->sff_dev_select(ap, 0);
|
||||
|
||||
/* issue bus reset */
|
||||
DPRINTK("about to softreset, devmask=%x\n", devmask);
|
||||
rc = ata_bus_softreset(ap, devmask, deadline);
|
||||
/* if link is occupied, -ENODEV too is an error */
|
||||
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
|
||||
@ -2022,7 +1994,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
|
||||
classes[1] = ata_sff_dev_classify(&link->device[1],
|
||||
devmask & (1 << 1), &err);
|
||||
|
||||
DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_sff_softreset);
|
||||
@ -2055,7 +2026,6 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
|
||||
if (online)
|
||||
*class = ata_sff_dev_classify(link->device, 1, NULL);
|
||||
|
||||
DPRINTK("EXIT, class=%u\n", *class);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
|
||||
@ -2085,10 +2055,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
|
||||
ap->ops->sff_dev_select(ap, 0);
|
||||
|
||||
/* bail out if no device is present */
|
||||
if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
|
||||
DPRINTK("EXIT, no device\n");
|
||||
if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
|
||||
return;
|
||||
}
|
||||
|
||||
/* set up device control */
|
||||
if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
|
||||
@ -2123,7 +2091,6 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
|
||||
&& count < 65536; count += 2)
|
||||
ioread16(ap->ioaddr.data_addr);
|
||||
|
||||
/* Can become DEBUG later */
|
||||
if (count)
|
||||
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
|
||||
|
||||
@ -2467,8 +2434,6 @@ static int ata_pci_init_one(struct pci_dev *pdev,
|
||||
struct ata_host *host = NULL;
|
||||
int rc;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
pi = ata_sff_find_valid_pi(ppi);
|
||||
if (!pi) {
|
||||
dev_err(&pdev->dev, "no valid port_info specified\n");
|
||||
@ -2614,7 +2579,6 @@ static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
|
||||
|
||||
prd[pi].addr = cpu_to_le32(addr);
|
||||
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
|
||||
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
|
||||
|
||||
pi++;
|
||||
sg_len -= len;
|
||||
@ -2674,7 +2638,6 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
|
||||
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
|
||||
}
|
||||
prd[pi].flags_len = cpu_to_le32(blen);
|
||||
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
|
||||
|
||||
pi++;
|
||||
sg_len -= len;
|
||||
@ -2756,8 +2719,11 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
|
||||
case ATA_PROT_DMA:
|
||||
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
|
||||
|
||||
trace_ata_tf_load(ap, &qc->tf);
|
||||
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
||||
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
||||
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_start(qc); /* initiate bmdma */
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
break;
|
||||
@ -2765,7 +2731,9 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
|
||||
case ATAPI_PROT_DMA:
|
||||
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
|
||||
|
||||
trace_ata_tf_load(ap, &qc->tf);
|
||||
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
||||
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
||||
ap->hsm_task_state = HSM_ST_FIRST;
|
||||
|
||||
@ -2806,13 +2774,14 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
|
||||
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
|
||||
/* check status of DMA engine */
|
||||
host_stat = ap->ops->bmdma_status(ap);
|
||||
VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
|
||||
trace_ata_bmdma_status(ap, host_stat);
|
||||
|
||||
/* if it's not our irq... */
|
||||
if (!(host_stat & ATA_DMA_INTR))
|
||||
return ata_sff_idle_irq(ap);
|
||||
|
||||
/* before we do anything else, clear DMA-Start bit */
|
||||
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_stop(qc);
|
||||
bmdma_stopped = true;
|
||||
|
||||
@ -2881,6 +2850,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
|
||||
u8 host_stat;
|
||||
|
||||
host_stat = ap->ops->bmdma_status(ap);
|
||||
trace_ata_bmdma_status(ap, host_stat);
|
||||
|
||||
/* BMDMA controllers indicate host bus error by
|
||||
* setting DMA_ERR bit and timing out. As it wasn't
|
||||
@ -2892,6 +2862,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
|
||||
thaw = true;
|
||||
}
|
||||
|
||||
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_stop(qc);
|
||||
|
||||
/* if we're gonna thaw, make sure IRQ is clear */
|
||||
@ -2925,6 +2896,7 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
|
||||
if (ata_is_dma(qc->tf.protocol)) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
|
||||
ap->ops->bmdma_stop(qc);
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
}
|
||||
|
@ -38,6 +38,24 @@ libata_trace_parse_status(struct trace_seq *p, unsigned char status)
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *
|
||||
libata_trace_parse_host_stat(struct trace_seq *p, unsigned char host_stat)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "{ ");
|
||||
if (host_stat & ATA_DMA_INTR)
|
||||
trace_seq_printf(p, "INTR ");
|
||||
if (host_stat & ATA_DMA_ERR)
|
||||
trace_seq_printf(p, "ERR ");
|
||||
if (host_stat & ATA_DMA_ACTIVE)
|
||||
trace_seq_printf(p, "ACTIVE ");
|
||||
trace_seq_putc(p, '}');
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *
|
||||
libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
|
||||
{
|
||||
@ -137,6 +155,35 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *
|
||||
libata_trace_parse_tf_flags(struct trace_seq *p, unsigned int tf_flags)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "%x", tf_flags);
|
||||
if (tf_flags) {
|
||||
trace_seq_printf(p, "{ ");
|
||||
if (tf_flags & ATA_TFLAG_LBA48)
|
||||
trace_seq_printf(p, "LBA48 ");
|
||||
if (tf_flags & ATA_TFLAG_ISADDR)
|
||||
trace_seq_printf(p, "ISADDR ");
|
||||
if (tf_flags & ATA_TFLAG_DEVICE)
|
||||
trace_seq_printf(p, "DEV ");
|
||||
if (tf_flags & ATA_TFLAG_WRITE)
|
||||
trace_seq_printf(p, "WRITE ");
|
||||
if (tf_flags & ATA_TFLAG_LBA)
|
||||
trace_seq_printf(p, "LBA ");
|
||||
if (tf_flags & ATA_TFLAG_FUA)
|
||||
trace_seq_printf(p, "FUA ");
|
||||
if (tf_flags & ATA_TFLAG_POLLING)
|
||||
trace_seq_printf(p, "POLL ");
|
||||
trace_seq_putc(p, '}');
|
||||
}
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *
|
||||
libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
|
||||
unsigned char feature, unsigned char hob_nsect)
|
||||
|
@ -163,7 +163,7 @@ static struct {
|
||||
{ AC_ERR_INVALID, "InvalidArg" },
|
||||
{ AC_ERR_OTHER, "Unknown" },
|
||||
{ AC_ERR_NODEV_HINT, "NoDeviceHint" },
|
||||
{ AC_ERR_NCQ, "NCQError" }
|
||||
{ AC_ERR_NCQ, "NCQError" }
|
||||
};
|
||||
ata_bitfield_name_match(err, ata_err_names)
|
||||
|
||||
@ -321,13 +321,43 @@ int ata_tport_add(struct device *parent,
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_port_classify - determine device type based on ATA-spec signature
|
||||
* @ap: ATA port device on which the classification should be run
|
||||
* @tf: ATA taskfile register set for device to be identified
|
||||
*
|
||||
* A wrapper around ata_dev_classify() to provide additional logging
|
||||
*
|
||||
* RETURNS:
|
||||
* Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
|
||||
* %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
|
||||
*/
|
||||
unsigned int ata_port_classify(struct ata_port *ap,
|
||||
const struct ata_taskfile *tf)
|
||||
{
|
||||
int i;
|
||||
unsigned int class = ata_dev_classify(tf);
|
||||
|
||||
/* Start with index '1' to skip the 'unknown' entry */
|
||||
for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) {
|
||||
if (ata_class_names[i].value == class) {
|
||||
ata_port_dbg(ap, "found %s device by sig\n",
|
||||
ata_class_names[i].name);
|
||||
return class;
|
||||
}
|
||||
}
|
||||
|
||||
ata_port_info(ap, "found unknown device (class %u)\n", class);
|
||||
return class;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_port_classify);
|
||||
|
||||
/*
|
||||
* ATA link attributes
|
||||
*/
|
||||
static int noop(int x) { return x; }
|
||||
|
||||
#define ata_link_show_linkspeed(field, format) \
|
||||
#define ata_link_show_linkspeed(field, format) \
|
||||
static ssize_t \
|
||||
show_ata_link_##field(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
@ -416,7 +446,7 @@ int ata_tlink_add(struct ata_link *link)
|
||||
dev->release = ata_tlink_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "link%d", ap->print_id);
|
||||
else
|
||||
else
|
||||
dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
|
||||
|
||||
transport_setup_device(dev);
|
||||
@ -472,7 +502,7 @@ ata_dev_attr(xfer, dma_mode);
|
||||
ata_dev_attr(xfer, xfer_mode);
|
||||
|
||||
|
||||
#define ata_dev_show_simple(field, format_string, cast) \
|
||||
#define ata_dev_show_simple(field, format_string, cast) \
|
||||
static ssize_t \
|
||||
show_ata_dev_##field(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
@ -482,9 +512,9 @@ show_ata_dev_##field(struct device *dev, \
|
||||
return scnprintf(buf, 20, format_string, cast ata_dev->field); \
|
||||
}
|
||||
|
||||
#define ata_dev_simple_attr(field, format_string, type) \
|
||||
#define ata_dev_simple_attr(field, format_string, type) \
|
||||
ata_dev_show_simple(field, format_string, (type)) \
|
||||
static DEVICE_ATTR(field, S_IRUGO, \
|
||||
static DEVICE_ATTR(field, S_IRUGO, \
|
||||
show_ata_dev_##field, NULL)
|
||||
|
||||
ata_dev_simple_attr(spdn_cnt, "%d\n", int);
|
||||
@ -502,7 +532,7 @@ static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
|
||||
|
||||
seconds = div_u64_rem(ent->timestamp, HZ, &rem);
|
||||
arg->written += sprintf(arg->buf + arg->written,
|
||||
"[%5llu.%09lu]", seconds,
|
||||
"[%5llu.%09lu]", seconds,
|
||||
rem * NSEC_PER_SEC / HZ);
|
||||
arg->written += get_ata_err_names(ent->err_mask,
|
||||
arg->buf + arg->written);
|
||||
@ -667,7 +697,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
dev->release = ata_tdev_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
|
||||
else
|
||||
else
|
||||
dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
|
||||
|
||||
transport_setup_device(dev);
|
||||
@ -689,7 +719,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
*/
|
||||
|
||||
#define SETUP_TEMPLATE(attrb, field, perm, test) \
|
||||
i->private_##attrb[count] = dev_attr_##field; \
|
||||
i->private_##attrb[count] = dev_attr_##field; \
|
||||
i->private_##attrb[count].attr.mode = perm; \
|
||||
i->attrb[count] = &i->private_##attrb[count]; \
|
||||
if (test) \
|
||||
|
@ -148,7 +148,6 @@ extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
||||
unsigned int id, u64 lun);
|
||||
void ata_scsi_sdev_config(struct scsi_device *sdev);
|
||||
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
|
||||
void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
|
||||
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
|
||||
|
||||
/* libata-eh.c */
|
||||
@ -166,7 +165,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
|
||||
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
|
||||
unsigned int action);
|
||||
extern void ata_eh_autopsy(struct ata_port *ap);
|
||||
const char *ata_get_cmd_descript(u8 command);
|
||||
const char *ata_get_cmd_name(u8 command);
|
||||
extern void ata_eh_report(struct ata_port *ap);
|
||||
extern int ata_eh_reset(struct ata_link *link, int classify,
|
||||
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
|
||||
@ -179,7 +178,7 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||
extern void ata_eh_finish(struct ata_port *ap);
|
||||
extern int ata_ering_map(struct ata_ering *ering,
|
||||
int (*map_fn)(struct ata_ering_entry *, void *),
|
||||
void *arg);
|
||||
void *arg);
|
||||
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
|
||||
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
|
||||
u8 *sense_buf, u8 dfl_sense_key);
|
||||
|
@ -37,7 +37,7 @@
|
||||
#define DRV_NAME "pata_ali"
|
||||
#define DRV_VERSION "0.7.8"
|
||||
|
||||
static int ali_atapi_dma = 0;
|
||||
static int ali_atapi_dma;
|
||||
module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
|
||||
MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
|
||||
|
||||
@ -123,7 +123,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
|
||||
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
||||
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
|
||||
if (strstr(model_num, "WDC"))
|
||||
return mask &= ~ATA_MASK_UDMA;
|
||||
mask &= ~ATA_MASK_UDMA;
|
||||
return mask;
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
struct ata_timing p;
|
||||
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
|
||||
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
|
||||
if (pair->dma_mode) {
|
||||
if (ata_dma_enabled(pair)) {
|
||||
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
|
||||
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
|
||||
}
|
||||
@ -264,7 +264,7 @@ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
|
||||
struct ata_timing p;
|
||||
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
|
||||
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
|
||||
if (pair->dma_mode) {
|
||||
if (ata_dma_enabled(pair)) {
|
||||
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
|
||||
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
|
||||
|
||||
if (peer) {
|
||||
/* This may be over conservative */
|
||||
if (peer->dma_mode) {
|
||||
if (ata_dma_enabled(peer)) {
|
||||
ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
|
||||
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <trace/events/libata.h>
|
||||
|
||||
#define DRIVER_NAME "arasan_cf"
|
||||
#define TIMEOUT msecs_to_jiffies(3000)
|
||||
@ -703,9 +704,11 @@ static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
|
||||
case ATA_PROT_DMA:
|
||||
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
|
||||
|
||||
trace_ata_tf_load(ap, &qc->tf);
|
||||
ap->ops->sff_tf_load(ap, &qc->tf);
|
||||
acdev->dma_status = 0;
|
||||
acdev->qc = qc;
|
||||
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
|
||||
arasan_cf_dma_start(acdev);
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
break;
|
||||
|
@ -155,7 +155,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
|
||||
case 1 ... 6:
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
|
||||
ata_port_warn(ap, "ATP867X: active %dclk is invalid. "
|
||||
"Using 12clk.\n", clk);
|
||||
fallthrough;
|
||||
case 9 ... 12:
|
||||
@ -171,7 +171,8 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
|
||||
return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
|
||||
}
|
||||
|
||||
static int atp867x_get_recover_clocks_shifted(unsigned int clk)
|
||||
static int atp867x_get_recover_clocks_shifted(struct ata_port *ap,
|
||||
unsigned int clk)
|
||||
{
|
||||
unsigned char clocks = clk;
|
||||
|
||||
@ -188,7 +189,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
|
||||
case 15:
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
|
||||
ata_port_warn(ap, "ATP867X: recover %dclk is invalid. "
|
||||
"Using default 12clk.\n", clk);
|
||||
fallthrough;
|
||||
case 12: /* default 12 clk */
|
||||
@ -225,7 +226,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
iowrite8(b, dp->dma_mode);
|
||||
|
||||
b = atp867x_get_active_clocks_shifted(ap, t.active) |
|
||||
atp867x_get_recover_clocks_shifted(t.recover);
|
||||
atp867x_get_recover_clocks_shifted(ap, t.recover);
|
||||
|
||||
if (adev->devno & 1)
|
||||
iowrite8(b, dp->slave_piospd);
|
||||
@ -233,7 +234,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
iowrite8(b, dp->mstr_piospd);
|
||||
|
||||
b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
|
||||
atp867x_get_recover_clocks_shifted(t.rec8b);
|
||||
atp867x_get_recover_clocks_shifted(ap, t.rec8b);
|
||||
|
||||
iowrite8(b, dp->eightb_piospd);
|
||||
}
|
||||
@ -270,7 +271,6 @@ static struct ata_port_operations atp867x_ops = {
|
||||
};
|
||||
|
||||
|
||||
#ifdef ATP867X_DEBUG
|
||||
static void atp867x_check_res(struct pci_dev *pdev)
|
||||
{
|
||||
int i;
|
||||
@ -280,7 +280,7 @@ static void atp867x_check_res(struct pci_dev *pdev)
|
||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
start = pci_resource_start(pdev, i);
|
||||
len = pci_resource_len(pdev, i);
|
||||
printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n",
|
||||
dev_dbg(&pdev->dev, "ATP867X: resource start:len=%lx:%lx\n",
|
||||
start, len);
|
||||
}
|
||||
}
|
||||
@ -290,49 +290,48 @@ static void atp867x_check_ports(struct ata_port *ap, int port)
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
struct atp867x_priv *dp = ap->private_data;
|
||||
|
||||
printk(KERN_DEBUG "ATP867X: port[%d] addresses\n"
|
||||
" cmd_addr =0x%llx, 0x%llx\n"
|
||||
" ctl_addr =0x%llx, 0x%llx\n"
|
||||
" bmdma_addr =0x%llx, 0x%llx\n"
|
||||
" data_addr =0x%llx\n"
|
||||
" error_addr =0x%llx\n"
|
||||
" feature_addr =0x%llx\n"
|
||||
" nsect_addr =0x%llx\n"
|
||||
" lbal_addr =0x%llx\n"
|
||||
" lbam_addr =0x%llx\n"
|
||||
" lbah_addr =0x%llx\n"
|
||||
" device_addr =0x%llx\n"
|
||||
" status_addr =0x%llx\n"
|
||||
" command_addr =0x%llx\n"
|
||||
" dp->dma_mode =0x%llx\n"
|
||||
" dp->mstr_piospd =0x%llx\n"
|
||||
" dp->slave_piospd =0x%llx\n"
|
||||
" dp->eightb_piospd =0x%llx\n"
|
||||
ata_port_dbg(ap, "ATP867X: port[%d] addresses\n"
|
||||
" cmd_addr =0x%lx, 0x%lx\n"
|
||||
" ctl_addr =0x%lx, 0x%lx\n"
|
||||
" bmdma_addr =0x%lx, 0x%lx\n"
|
||||
" data_addr =0x%lx\n"
|
||||
" error_addr =0x%lx\n"
|
||||
" feature_addr =0x%lx\n"
|
||||
" nsect_addr =0x%lx\n"
|
||||
" lbal_addr =0x%lx\n"
|
||||
" lbam_addr =0x%lx\n"
|
||||
" lbah_addr =0x%lx\n"
|
||||
" device_addr =0x%lx\n"
|
||||
" status_addr =0x%lx\n"
|
||||
" command_addr =0x%lx\n"
|
||||
" dp->dma_mode =0x%lx\n"
|
||||
" dp->mstr_piospd =0x%lx\n"
|
||||
" dp->slave_piospd =0x%lx\n"
|
||||
" dp->eightb_piospd =0x%lx\n"
|
||||
" dp->pci66mhz =0x%lx\n",
|
||||
port,
|
||||
(unsigned long long)ioaddr->cmd_addr,
|
||||
(unsigned long long)ATP867X_IO_PORTBASE(ap, port),
|
||||
(unsigned long long)ioaddr->ctl_addr,
|
||||
(unsigned long long)ATP867X_IO_ALTSTATUS(ap, port),
|
||||
(unsigned long long)ioaddr->bmdma_addr,
|
||||
(unsigned long long)ATP867X_IO_DMABASE(ap, port),
|
||||
(unsigned long long)ioaddr->data_addr,
|
||||
(unsigned long long)ioaddr->error_addr,
|
||||
(unsigned long long)ioaddr->feature_addr,
|
||||
(unsigned long long)ioaddr->nsect_addr,
|
||||
(unsigned long long)ioaddr->lbal_addr,
|
||||
(unsigned long long)ioaddr->lbam_addr,
|
||||
(unsigned long long)ioaddr->lbah_addr,
|
||||
(unsigned long long)ioaddr->device_addr,
|
||||
(unsigned long long)ioaddr->status_addr,
|
||||
(unsigned long long)ioaddr->command_addr,
|
||||
(unsigned long long)dp->dma_mode,
|
||||
(unsigned long long)dp->mstr_piospd,
|
||||
(unsigned long long)dp->slave_piospd,
|
||||
(unsigned long long)dp->eightb_piospd,
|
||||
(unsigned long)ioaddr->cmd_addr,
|
||||
(unsigned long)ATP867X_IO_PORTBASE(ap, port),
|
||||
(unsigned long)ioaddr->ctl_addr,
|
||||
(unsigned long)ATP867X_IO_ALTSTATUS(ap, port),
|
||||
(unsigned long)ioaddr->bmdma_addr,
|
||||
(unsigned long)ATP867X_IO_DMABASE(ap, port),
|
||||
(unsigned long)ioaddr->data_addr,
|
||||
(unsigned long)ioaddr->error_addr,
|
||||
(unsigned long)ioaddr->feature_addr,
|
||||
(unsigned long)ioaddr->nsect_addr,
|
||||
(unsigned long)ioaddr->lbal_addr,
|
||||
(unsigned long)ioaddr->lbam_addr,
|
||||
(unsigned long)ioaddr->lbah_addr,
|
||||
(unsigned long)ioaddr->device_addr,
|
||||
(unsigned long)ioaddr->status_addr,
|
||||
(unsigned long)ioaddr->command_addr,
|
||||
(unsigned long)dp->dma_mode,
|
||||
(unsigned long)dp->mstr_piospd,
|
||||
(unsigned long)dp->slave_piospd,
|
||||
(unsigned long)dp->eightb_piospd,
|
||||
(unsigned long)dp->pci66mhz);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int atp867x_set_priv(struct ata_port *ap)
|
||||
{
|
||||
@ -370,8 +369,7 @@ static void atp867x_fixup(struct ata_host *host)
|
||||
if (v < 0x80) {
|
||||
v = 0x80;
|
||||
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v);
|
||||
printk(KERN_DEBUG "ATP867X: set latency timer of device %s"
|
||||
" to %d\n", pci_name(pdev), v);
|
||||
dev_dbg(&pdev->dev, "ATP867X: set latency timer to %d\n", v);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -419,13 +417,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
|
||||
return rc;
|
||||
host->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
#ifdef ATP867X_DEBUG
|
||||
atp867x_check_res(pdev);
|
||||
|
||||
for (i = 0; i < PCI_STD_NUM_BARS; i++)
|
||||
printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
|
||||
(unsigned long long)(host->iomap[i]));
|
||||
#endif
|
||||
dev_dbg(gdev, "ATP867X: iomap[%d]=0x%p\n", i,
|
||||
host->iomap[i]);
|
||||
|
||||
/*
|
||||
* request, iomap BARs and init port addresses accordingly
|
||||
@ -444,9 +440,8 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
#ifdef ATP867X_DEBUG
|
||||
atp867x_check_ports(ap, i);
|
||||
#endif
|
||||
|
||||
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
|
||||
(unsigned long)ioaddr->cmd_addr,
|
||||
(unsigned long)ioaddr->ctl_addr);
|
||||
@ -486,7 +481,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
|
||||
dev_info(&pdev->dev, "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
|
||||
pdev->device);
|
||||
|
||||
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user