mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-11 17:55:54 +00:00
final commit before NXT release
This commit is contained in:
parent
df8b07f6ea
commit
deda272df8
@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
|
|||||||
|
|
||||||
For example, if current state of ``libbpf.map`` is:
|
For example, if current state of ``libbpf.map`` is:
|
||||||
|
|
||||||
.. code-block:: c
|
.. code-block:: none
|
||||||
|
|
||||||
LIBBPF_0.0.1 {
|
LIBBPF_0.0.1 {
|
||||||
global:
|
global:
|
||||||
@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
|
|||||||
, and a new symbol ``bpf_func_c`` is being introduced, then
|
, and a new symbol ``bpf_func_c`` is being introduced, then
|
||||||
``libbpf.map`` should be changed like this:
|
``libbpf.map`` should be changed like this:
|
||||||
|
|
||||||
.. code-block:: c
|
.. code-block:: none
|
||||||
|
|
||||||
LIBBPF_0.0.1 {
|
LIBBPF_0.0.1 {
|
||||||
global:
|
global:
|
||||||
|
@ -152,47 +152,6 @@ allOf:
|
|||||||
maxItems: 1
|
maxItems: 1
|
||||||
st,drdy-int-pin: false
|
st,drdy-int-pin: false
|
||||||
|
|
||||||
- if:
|
|
||||||
properties:
|
|
||||||
compatible:
|
|
||||||
enum:
|
|
||||||
# Two intertial interrupts i.e. accelerometer/gyro interrupts
|
|
||||||
- st,h3lis331dl-accel
|
|
||||||
- st,l3g4200d-gyro
|
|
||||||
- st,l3g4is-gyro
|
|
||||||
- st,l3gd20-gyro
|
|
||||||
- st,l3gd20h-gyro
|
|
||||||
- st,lis2de12
|
|
||||||
- st,lis2dw12
|
|
||||||
- st,lis2hh12
|
|
||||||
- st,lis2dh12-accel
|
|
||||||
- st,lis331dl-accel
|
|
||||||
- st,lis331dlh-accel
|
|
||||||
- st,lis3de
|
|
||||||
- st,lis3dh-accel
|
|
||||||
- st,lis3dhh
|
|
||||||
- st,lis3mdl-magn
|
|
||||||
- st,lng2dm-accel
|
|
||||||
- st,lps331ap-press
|
|
||||||
- st,lsm303agr-accel
|
|
||||||
- st,lsm303dlh-accel
|
|
||||||
- st,lsm303dlhc-accel
|
|
||||||
- st,lsm303dlm-accel
|
|
||||||
- st,lsm330-accel
|
|
||||||
- st,lsm330-gyro
|
|
||||||
- st,lsm330d-accel
|
|
||||||
- st,lsm330d-gyro
|
|
||||||
- st,lsm330dl-accel
|
|
||||||
- st,lsm330dl-gyro
|
|
||||||
- st,lsm330dlc-accel
|
|
||||||
- st,lsm330dlc-gyro
|
|
||||||
- st,lsm9ds0-gyro
|
|
||||||
- st,lsm9ds1-magn
|
|
||||||
then:
|
|
||||||
properties:
|
|
||||||
interrupts:
|
|
||||||
maxItems: 2
|
|
||||||
|
|
||||||
required:
|
required:
|
||||||
- compatible
|
- compatible
|
||||||
- reg
|
- reg
|
||||||
|
@ -18,114 +18,5 @@ real, with all the uAPI bits is:
|
|||||||
* Route shmem backend over to TTM SYSTEM for discrete
|
* Route shmem backend over to TTM SYSTEM for discrete
|
||||||
* TTM purgeable object support
|
* TTM purgeable object support
|
||||||
* Move i915 buddy allocator over to TTM
|
* Move i915 buddy allocator over to TTM
|
||||||
* MMAP ioctl mode(see `I915 MMAP`_)
|
|
||||||
* SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
|
|
||||||
* Send RFC(with mesa-dev on cc) for final sign off on the uAPI
|
* Send RFC(with mesa-dev on cc) for final sign off on the uAPI
|
||||||
* Add pciid for DG1 and turn on uAPI for real
|
* Add pciid for DG1 and turn on uAPI for real
|
||||||
|
|
||||||
New object placement and region query uAPI
|
|
||||||
==========================================
|
|
||||||
Starting from DG1 we need to give userspace the ability to allocate buffers from
|
|
||||||
device local-memory. Currently the driver supports gem_create, which can place
|
|
||||||
buffers in system memory via shmem, and the usual assortment of other
|
|
||||||
interfaces, like dumb buffers and userptr.
|
|
||||||
|
|
||||||
To support this new capability, while also providing a uAPI which will work
|
|
||||||
beyond just DG1, we propose to offer three new bits of uAPI:
|
|
||||||
|
|
||||||
DRM_I915_QUERY_MEMORY_REGIONS
|
|
||||||
-----------------------------
|
|
||||||
New query ID which allows userspace to discover the list of supported memory
|
|
||||||
regions(like system-memory and local-memory) for a given device. We identify
|
|
||||||
each region with a class and instance pair, which should be unique. The class
|
|
||||||
here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
|
|
||||||
like DG1.
|
|
||||||
|
|
||||||
Side note: The class/instance design is borrowed from our existing engine uAPI,
|
|
||||||
where we describe every physical engine in terms of its class, and the
|
|
||||||
particular instance, since we can have more than one per class.
|
|
||||||
|
|
||||||
In the future we also want to expose more information which can further
|
|
||||||
describe the capabilities of a region.
|
|
||||||
|
|
||||||
.. kernel-doc:: include/uapi/drm/i915_drm.h
|
|
||||||
:functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
|
|
||||||
|
|
||||||
GEM_CREATE_EXT
|
|
||||||
--------------
|
|
||||||
New ioctl which is basically just gem_create but now allows userspace to provide
|
|
||||||
a chain of possible extensions. Note that if we don't provide any extensions and
|
|
||||||
set flags=0 then we get the exact same behaviour as gem_create.
|
|
||||||
|
|
||||||
Side note: We also need to support PXP[1] in the near future, which is also
|
|
||||||
applicable to integrated platforms, and adds its own gem_create_ext extension,
|
|
||||||
which basically lets userspace mark a buffer as "protected".
|
|
||||||
|
|
||||||
.. kernel-doc:: include/uapi/drm/i915_drm.h
|
|
||||||
:functions: drm_i915_gem_create_ext
|
|
||||||
|
|
||||||
I915_GEM_CREATE_EXT_MEMORY_REGIONS
|
|
||||||
----------------------------------
|
|
||||||
Implemented as an extension for gem_create_ext, we would now allow userspace to
|
|
||||||
optionally provide an immutable list of preferred placements at creation time,
|
|
||||||
in priority order, for a given buffer object. For the placements we expect
|
|
||||||
them each to use the class/instance encoding, as per the output of the regions
|
|
||||||
query. Having the list in priority order will be useful in the future when
|
|
||||||
placing an object, say during eviction.
|
|
||||||
|
|
||||||
.. kernel-doc:: include/uapi/drm/i915_drm.h
|
|
||||||
:functions: drm_i915_gem_create_ext_memory_regions
|
|
||||||
|
|
||||||
One fair criticism here is that this seems a little over-engineered[2]. If we
|
|
||||||
just consider DG1 then yes, a simple gem_create.flags or something is totally
|
|
||||||
all that's needed to tell the kernel to allocate the buffer in local-memory or
|
|
||||||
whatever. However looking to the future we need uAPI which can also support
|
|
||||||
upcoming Xe HP multi-tile architecture in a sane way, where there can be
|
|
||||||
multiple local-memory instances for a given device, and so using both class and
|
|
||||||
instance in our uAPI to describe regions is desirable, although specifically
|
|
||||||
for DG1 it's uninteresting, since we only have a single local-memory instance.
|
|
||||||
|
|
||||||
Existing uAPI issues
|
|
||||||
====================
|
|
||||||
Some potential issues we still need to resolve.
|
|
||||||
|
|
||||||
I915 MMAP
|
|
||||||
---------
|
|
||||||
In i915 there are multiple ways to MMAP GEM object, including mapping the same
|
|
||||||
object using different mapping types(WC vs WB), i.e multiple active mmaps per
|
|
||||||
object. TTM expects one MMAP at most for the lifetime of the object. If it
|
|
||||||
turns out that we have to backpedal here, there might be some potential
|
|
||||||
userspace fallout.
|
|
||||||
|
|
||||||
I915 SET/GET CACHING
|
|
||||||
--------------------
|
|
||||||
In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
|
|
||||||
DG1 doesn't support non-snooped pcie transactions, so we can just always
|
|
||||||
allocate as WB for smem-only buffers. If/when our hw gains support for
|
|
||||||
non-snooped pcie transactions then we must fix this mode at allocation time as
|
|
||||||
a new GEM extension.
|
|
||||||
|
|
||||||
This is related to the mmap problem, because in general (meaning, when we're
|
|
||||||
not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
|
|
||||||
allocation mode.
|
|
||||||
|
|
||||||
Possible idea is to let the kernel picks the mmap mode for userspace from the
|
|
||||||
following table:
|
|
||||||
|
|
||||||
smem-only: WB. Userspace does not need to call clflush.
|
|
||||||
|
|
||||||
smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
|
|
||||||
memory, and always give userspace a WC mapping. GPU still does snooped access
|
|
||||||
here(assuming we can't turn it off like on DG1), which is a bit inefficient.
|
|
||||||
|
|
||||||
lmem only: always WC
|
|
||||||
|
|
||||||
This means on discrete you only get a single mmap mode, all others must be
|
|
||||||
rejected. That's probably going to be a new default mode or something like
|
|
||||||
that.
|
|
||||||
|
|
||||||
Links
|
|
||||||
=====
|
|
||||||
[1] https://patchwork.freedesktop.org/series/86798/
|
|
||||||
|
|
||||||
[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
|
|
||||||
|
@ -17,6 +17,7 @@ Introduction
|
|||||||
busses/index
|
busses/index
|
||||||
i2c-topology
|
i2c-topology
|
||||||
muxes/i2c-mux-gpio
|
muxes/i2c-mux-gpio
|
||||||
|
i2c-sysfs
|
||||||
|
|
||||||
Writing device drivers
|
Writing device drivers
|
||||||
======================
|
======================
|
||||||
|
@ -191,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
|
|||||||
TCP connections may be offloaded from nf conntrack to nf flow table.
|
TCP connections may be offloaded from nf conntrack to nf flow table.
|
||||||
Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
|
Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
|
||||||
|
|
||||||
nf_flowtable_tcp_pickup - INTEGER (seconds)
|
|
||||||
default 120
|
|
||||||
|
|
||||||
TCP connection timeout after being aged from nf flow table offload.
|
|
||||||
|
|
||||||
nf_flowtable_udp_timeout - INTEGER (seconds)
|
nf_flowtable_udp_timeout - INTEGER (seconds)
|
||||||
default 30
|
default 30
|
||||||
|
|
||||||
Control offload timeout for udp connections.
|
Control offload timeout for udp connections.
|
||||||
UDP connections may be offloaded from nf conntrack to nf flow table.
|
UDP connections may be offloaded from nf conntrack to nf flow table.
|
||||||
Once aged, the connection is returned to nf conntrack with udp pickup timeout.
|
Once aged, the connection is returned to nf conntrack with udp pickup timeout.
|
||||||
|
|
||||||
nf_flowtable_udp_pickup - INTEGER (seconds)
|
|
||||||
default 30
|
|
||||||
|
|
||||||
UDP connection timeout after being aged from nf flow table offload.
|
|
||||||
|
@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via
|
|||||||
``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
|
``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
|
||||||
``struct seccomp_notif_addfd`` should be the same ``id`` as in
|
``struct seccomp_notif_addfd`` should be the same ``id`` as in
|
||||||
``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
|
``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
|
||||||
like O_EXEC on the file descriptor in the notifying process. If the supervisor
|
like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor
|
||||||
wants to inject the file descriptor with a specific number, the
|
wants to inject the file descriptor with a specific number, the
|
||||||
``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
|
``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
|
||||||
the specific number to use. If that file descriptor is already open in the
|
the specific number to use. If that file descriptor is already open in the
|
||||||
|
@ -25,10 +25,10 @@ On x86:
|
|||||||
|
|
||||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
||||||
|
|
||||||
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is
|
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
|
||||||
taken inside kvm->arch.mmu_lock, and cannot be taken without already
|
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
|
||||||
holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
|
cannot be taken without already holding kvm->arch.mmu_lock (typically with
|
||||||
there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
|
``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
|
||||||
|
|
||||||
Everything else is a leaf: no other lock is taken inside the critical
|
Everything else is a leaf: no other lock is taken inside the critical
|
||||||
sections.
|
sections.
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
*/
|
*/
|
||||||
static inline __sum16 csum_fold(__wsum s)
|
static inline __sum16 csum_fold(__wsum s)
|
||||||
{
|
{
|
||||||
unsigned r = s << 16 | s >> 16; /* ror */
|
unsigned int r = s << 16 | s >> 16; /* ror */
|
||||||
s = ~s;
|
s = ~s;
|
||||||
s -= r;
|
s -= r;
|
||||||
return s >> 16;
|
return s >> 16;
|
||||||
|
@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
|
|||||||
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
||||||
#define CACHE_OP_UNSUPPORTED 0xffff
|
#define CACHE_OP_UNSUPPORTED 0xffff
|
||||||
|
|
||||||
static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||||
[C(L1D)] = {
|
[C(L1D)] = {
|
||||||
[C(OP_READ)] = {
|
[C(OP_READ)] = {
|
||||||
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
|
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
|
||||||
|
@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
|
|||||||
|
|
||||||
void fpu_init_task(struct pt_regs *regs)
|
void fpu_init_task(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
const unsigned int fwe = 0x80000000;
|
||||||
|
|
||||||
/* default rounding mode */
|
/* default rounding mode */
|
||||||
write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
|
write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
|
||||||
|
|
||||||
/* set "Write enable" to allow explicit write to exception flags */
|
/* Initialize to zero: setting requires FWE be set */
|
||||||
write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
|
write_aux_reg(ARC_REG_FPU_STATUS, fwe);
|
||||||
}
|
}
|
||||||
|
|
||||||
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
|
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
struct arc_fpu *save = &prev->thread.fpu;
|
struct arc_fpu *save = &prev->thread.fpu;
|
||||||
struct arc_fpu *restore = &next->thread.fpu;
|
struct arc_fpu *restore = &next->thread.fpu;
|
||||||
|
const unsigned int fwe = 0x80000000;
|
||||||
|
|
||||||
save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
|
save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
|
||||||
save->status = read_aux_reg(ARC_REG_FPU_STATUS);
|
save->status = read_aux_reg(ARC_REG_FPU_STATUS);
|
||||||
|
|
||||||
write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
|
write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
|
||||||
write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
|
write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
|
|||||||
{
|
{
|
||||||
const u8 *ptr;
|
const u8 *ptr;
|
||||||
unsigned long tableSize = table->size, hdrSize;
|
unsigned long tableSize = table->size, hdrSize;
|
||||||
unsigned n;
|
unsigned int n;
|
||||||
const u32 *fde;
|
const u32 *fde;
|
||||||
struct {
|
struct {
|
||||||
u8 version;
|
u8 version;
|
||||||
@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
|
|||||||
{
|
{
|
||||||
const u8 *cur = *pcur;
|
const u8 *cur = *pcur;
|
||||||
uleb128_t value;
|
uleb128_t value;
|
||||||
unsigned shift;
|
unsigned int shift;
|
||||||
|
|
||||||
for (shift = 0, value = 0; cur < end; shift += 7) {
|
for (shift = 0, value = 0; cur < end; shift += 7) {
|
||||||
if (shift + 7 > 8 * sizeof(value)
|
if (shift + 7 > 8 * sizeof(value)
|
||||||
@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
|
|||||||
{
|
{
|
||||||
const u8 *cur = *pcur;
|
const u8 *cur = *pcur;
|
||||||
sleb128_t value;
|
sleb128_t value;
|
||||||
unsigned shift;
|
unsigned int shift;
|
||||||
|
|
||||||
for (shift = 0, value = 0; cur < end; shift += 7) {
|
for (shift = 0, value = 0; cur < end; shift += 7) {
|
||||||
if (shift + 7 > 8 * sizeof(value)
|
if (shift + 7 > 8 * sizeof(value)
|
||||||
@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
|
|||||||
static signed fde_pointer_type(const u32 *cie)
|
static signed fde_pointer_type(const u32 *cie)
|
||||||
{
|
{
|
||||||
const u8 *ptr = (const u8 *)(cie + 2);
|
const u8 *ptr = (const u8 *)(cie + 2);
|
||||||
unsigned version = *ptr;
|
unsigned int version = *ptr;
|
||||||
|
|
||||||
if (*++ptr) {
|
if (*++ptr) {
|
||||||
const char *aug;
|
const char *aug;
|
||||||
@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
|
|||||||
const u8 *ptr = NULL, *end = NULL;
|
const u8 *ptr = NULL, *end = NULL;
|
||||||
unsigned long pc = UNW_PC(frame) - frame->call_frame;
|
unsigned long pc = UNW_PC(frame) - frame->call_frame;
|
||||||
unsigned long startLoc = 0, endLoc = 0, cfa;
|
unsigned long startLoc = 0, endLoc = 0, cfa;
|
||||||
unsigned i;
|
unsigned int i;
|
||||||
signed ptrType = -1;
|
signed ptrType = -1;
|
||||||
uleb128_t retAddrReg = 0;
|
uleb128_t retAddrReg = 0;
|
||||||
const struct unwind_table *table;
|
const struct unwind_table *table;
|
||||||
|
@ -88,6 +88,8 @@ SECTIONS
|
|||||||
CPUIDLE_TEXT
|
CPUIDLE_TEXT
|
||||||
LOCK_TEXT
|
LOCK_TEXT
|
||||||
KPROBES_TEXT
|
KPROBES_TEXT
|
||||||
|
IRQENTRY_TEXT
|
||||||
|
SOFTIRQENTRY_TEXT
|
||||||
*(.fixup)
|
*(.fixup)
|
||||||
*(.gnu.warning)
|
*(.gnu.warning)
|
||||||
}
|
}
|
||||||
|
@ -123,6 +123,10 @@ dtbo-$(CONFIG_ARCH_BCM2835) += \
|
|||||||
merus-amp.dtbo \
|
merus-amp.dtbo \
|
||||||
midi-uart0.dtbo \
|
midi-uart0.dtbo \
|
||||||
midi-uart1.dtbo \
|
midi-uart1.dtbo \
|
||||||
|
midi-uart2.dtbo \
|
||||||
|
midi-uart3.dtbo \
|
||||||
|
midi-uart4.dtbo \
|
||||||
|
midi-uart5.dtbo \
|
||||||
minipitft13.dtbo \
|
minipitft13.dtbo \
|
||||||
miniuart-bt.dtbo \
|
miniuart-bt.dtbo \
|
||||||
mmc.dtbo \
|
mmc.dtbo \
|
||||||
|
@ -1390,6 +1390,8 @@ Params: abx80x Select one of the ABx80x family:
|
|||||||
AB0801, AB0803, AB0804, AB0805,
|
AB0801, AB0803, AB0804, AB0805,
|
||||||
AB1801, AB1803, AB1804, AB1805
|
AB1801, AB1803, AB1804, AB1805
|
||||||
|
|
||||||
|
bq32000 Select the TI BQ32000 device
|
||||||
|
|
||||||
ds1307 Select the DS1307 device
|
ds1307 Select the DS1307 device
|
||||||
|
|
||||||
ds1339 Select the DS1339 device
|
ds1339 Select the DS1339 device
|
||||||
@ -1434,6 +1436,9 @@ Params: abx80x Select one of the ABx80x family:
|
|||||||
device must be configured to use the specified
|
device must be configured to use the specified
|
||||||
address.
|
address.
|
||||||
|
|
||||||
|
trickle-diode-disable Do not use the internal trickle charger diode
|
||||||
|
(BQ32000 only)
|
||||||
|
|
||||||
trickle-diode-type Diode type for trickle charge - "standard" or
|
trickle-diode-type Diode type for trickle charge - "standard" or
|
||||||
"schottky" (ABx80x and RV1805 only)
|
"schottky" (ABx80x and RV1805 only)
|
||||||
|
|
||||||
@ -1455,6 +1460,8 @@ Params: abx80x Select one of the ABx80x family:
|
|||||||
AB0801, AB0803, AB0804, AB0805,
|
AB0801, AB0803, AB0804, AB0805,
|
||||||
AB1801, AB1803, AB1804, AB1805
|
AB1801, AB1803, AB1804, AB1805
|
||||||
|
|
||||||
|
bq32000 Select the TI BQ32000 device
|
||||||
|
|
||||||
ds1307 Select the DS1307 device
|
ds1307 Select the DS1307 device
|
||||||
|
|
||||||
ds1339 Select the DS1339 device
|
ds1339 Select the DS1339 device
|
||||||
@ -1495,6 +1502,9 @@ Params: abx80x Select one of the ABx80x family:
|
|||||||
device must be configured to use the specified
|
device must be configured to use the specified
|
||||||
address.
|
address.
|
||||||
|
|
||||||
|
trickle-diode-disable Do not use the internal trickle charger diode
|
||||||
|
(BQ32000 only)
|
||||||
|
|
||||||
trickle-diode-type Diode type for trickle charge - "standard" or
|
trickle-diode-type Diode type for trickle charge - "standard" or
|
||||||
"schottky" (ABx80x and RV1805 only)
|
"schottky" (ABx80x and RV1805 only)
|
||||||
|
|
||||||
@ -2088,6 +2098,34 @@ Load: dtoverlay=midi-uart1
|
|||||||
Params: <None>
|
Params: <None>
|
||||||
|
|
||||||
|
|
||||||
|
Name: midi-uart2
|
||||||
|
Info: Configures UART2 (ttyAMA1) so that a requested 38.4kbaud actually gets
|
||||||
|
31.25kbaud, the frequency required for MIDI
|
||||||
|
Load: dtoverlay=midi-uart2
|
||||||
|
Params: <None>
|
||||||
|
|
||||||
|
|
||||||
|
Name: midi-uart3
|
||||||
|
Info: Configures UART3 (ttyAMA2) so that a requested 38.4kbaud actually gets
|
||||||
|
31.25kbaud, the frequency required for MIDI
|
||||||
|
Load: dtoverlay=midi-uart3
|
||||||
|
Params: <None>
|
||||||
|
|
||||||
|
|
||||||
|
Name: midi-uart4
|
||||||
|
Info: Configures UART4 (ttyAMA3) so that a requested 38.4kbaud actually gets
|
||||||
|
31.25kbaud, the frequency required for MIDI
|
||||||
|
Load: dtoverlay=midi-uart4
|
||||||
|
Params: <None>
|
||||||
|
|
||||||
|
|
||||||
|
Name: midi-uart5
|
||||||
|
Info: Configures UART5 (ttyAMA4) so that a requested 38.4kbaud actually gets
|
||||||
|
31.25kbaud, the frequency required for MIDI
|
||||||
|
Load: dtoverlay=midi-uart5
|
||||||
|
Params: <None>
|
||||||
|
|
||||||
|
|
||||||
Name: minipitft13
|
Name: minipitft13
|
||||||
Info: Overlay for AdaFruit Mini Pi 1.3" TFT via SPI using fbtft driver.
|
Info: Overlay for AdaFruit Mini Pi 1.3" TFT via SPI using fbtft driver.
|
||||||
Load: dtoverlay=minipitft13,<param>=<val>
|
Load: dtoverlay=minipitft13,<param>=<val>
|
||||||
|
@ -256,6 +256,21 @@ s35390a: s35390a@30 {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
fragment@19 {
|
||||||
|
target = <&i2cbus>;
|
||||||
|
__dormant__ {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
bq32000: bq32000@68 {
|
||||||
|
compatible = "ti,bq32000";
|
||||||
|
trickle-resistor-ohms = <0>;
|
||||||
|
reg = <0x68>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
__overrides__ {
|
__overrides__ {
|
||||||
abx80x = <0>,"+0";
|
abx80x = <0>,"+0";
|
||||||
ds1307 = <0>,"+1";
|
ds1307 = <0>,"+1";
|
||||||
@ -276,6 +291,7 @@ __overrides__ {
|
|||||||
pcf85063 = <0>,"+15";
|
pcf85063 = <0>,"+15";
|
||||||
pcf85063a = <0>,"+16";
|
pcf85063a = <0>,"+16";
|
||||||
s35390a = <0>,"+18";
|
s35390a = <0>,"+18";
|
||||||
|
bq32000 = <0>,"+19";
|
||||||
|
|
||||||
addr = <&abx80x>, "reg:0",
|
addr = <&abx80x>, "reg:0",
|
||||||
<&ds1307>, "reg:0",
|
<&ds1307>, "reg:0",
|
||||||
@ -288,13 +304,15 @@ __overrides__ {
|
|||||||
<&m41t62>, "reg:0",
|
<&m41t62>, "reg:0",
|
||||||
<&rv1805>, "reg:0",
|
<&rv1805>, "reg:0",
|
||||||
<&s35390a>, "reg:0";
|
<&s35390a>, "reg:0";
|
||||||
|
trickle-diode-disable = <&bq32000>,"trickle-diode-disable?";
|
||||||
trickle-diode-type = <&abx80x>,"abracon,tc-diode",
|
trickle-diode-type = <&abx80x>,"abracon,tc-diode",
|
||||||
<&rv1805>,"abracon,tc-diode";
|
<&rv1805>,"abracon,tc-diode";
|
||||||
trickle-resistor-ohms = <&ds1339>,"trickle-resistor-ohms:0",
|
trickle-resistor-ohms = <&ds1339>,"trickle-resistor-ohms:0",
|
||||||
<&ds1340>,"trickle-resistor-ohms:0",
|
<&ds1340>,"trickle-resistor-ohms:0",
|
||||||
<&abx80x>,"abracon,tc-resistor:0",
|
<&abx80x>,"abracon,tc-resistor:0",
|
||||||
<&rv3028>,"trickle-resistor-ohms:0",
|
<&rv3028>,"trickle-resistor-ohms:0",
|
||||||
<&rv1805>,"abracon,tc-resistor:0";
|
<&rv1805>,"abracon,tc-resistor:0",
|
||||||
|
<&bq32000>,"abracon,tc-resistor:0";
|
||||||
backup-switchover-mode = <&rv3028>,"backup-switchover-mode:0";
|
backup-switchover-mode = <&rv3028>,"backup-switchover-mode:0";
|
||||||
wakeup-source = <&ds1339>,"wakeup-source?",
|
wakeup-source = <&ds1339>,"wakeup-source?",
|
||||||
<&ds3231>,"wakeup-source?",
|
<&ds3231>,"wakeup-source?",
|
||||||
|
37
arch/arm/boot/dts/overlays/midi-uart2-overlay.dts
Normal file
37
arch/arm/boot/dts/overlays/midi-uart2-overlay.dts
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
/dts-v1/;
|
||||||
|
/plugin/;
|
||||||
|
|
||||||
|
#include <dt-bindings/clock/bcm2835.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fake a higher clock rate to get a larger divisor, and thereby a lower
|
||||||
|
* baudrate. The real clock is 48MHz, which we scale so that requesting
|
||||||
|
* 38.4kHz results in an actual 31.25kHz.
|
||||||
|
*
|
||||||
|
* 48000000*38400/31250 = 58982400
|
||||||
|
*/
|
||||||
|
|
||||||
|
/{
|
||||||
|
compatible = "brcm,bcm2835";
|
||||||
|
|
||||||
|
fragment@0 {
|
||||||
|
target-path = "/";
|
||||||
|
__overlay__ {
|
||||||
|
midi_clk: midi_clk2 {
|
||||||
|
compatible = "fixed-clock";
|
||||||
|
#clock-cells = <0>;
|
||||||
|
clock-output-names = "uart2_pclk";
|
||||||
|
clock-frequency = <58982400>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fragment@1 {
|
||||||
|
target = <&uart2>;
|
||||||
|
__overlay__ {
|
||||||
|
clocks = <&midi_clk>,
|
||||||
|
<&clocks BCM2835_CLOCK_VPU>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
38
arch/arm/boot/dts/overlays/midi-uart3-overlay.dts
Normal file
38
arch/arm/boot/dts/overlays/midi-uart3-overlay.dts
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/dts-v1/;
|
||||||
|
/plugin/;
|
||||||
|
|
||||||
|
#include <dt-bindings/clock/bcm2835.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fake a higher clock rate to get a larger divisor, and thereby a lower
|
||||||
|
* baudrate. The real clock is 48MHz, which we scale so that requesting
|
||||||
|
* 38.4kHz results in an actual 31.25kHz.
|
||||||
|
*
|
||||||
|
* 48000000*38400/31250 = 58982400
|
||||||
|
*/
|
||||||
|
|
||||||
|
/{
|
||||||
|
compatible = "brcm,bcm2835";
|
||||||
|
|
||||||
|
fragment@0 {
|
||||||
|
target-path = "/";
|
||||||
|
__overlay__ {
|
||||||
|
midi_clk: midi_clk3 {
|
||||||
|
compatible = "fixed-clock";
|
||||||
|
#clock-cells = <0>;
|
||||||
|
clock-output-names = "uart3_pclk";
|
||||||
|
clock-frequency = <58982400>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fragment@1 {
|
||||||
|
target = <&uart3>;
|
||||||
|
__overlay__ {
|
||||||
|
clocks = <&midi_clk>,
|
||||||
|
<&clocks BCM2835_CLOCK_VPU>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
38
arch/arm/boot/dts/overlays/midi-uart4-overlay.dts
Normal file
38
arch/arm/boot/dts/overlays/midi-uart4-overlay.dts
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/dts-v1/;
|
||||||
|
/plugin/;
|
||||||
|
|
||||||
|
#include <dt-bindings/clock/bcm2835.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fake a higher clock rate to get a larger divisor, and thereby a lower
|
||||||
|
* baudrate. The real clock is 48MHz, which we scale so that requesting
|
||||||
|
* 38.4kHz results in an actual 31.25kHz.
|
||||||
|
*
|
||||||
|
* 48000000*38400/31250 = 58982400
|
||||||
|
*/
|
||||||
|
|
||||||
|
/{
|
||||||
|
compatible = "brcm,bcm2835";
|
||||||
|
|
||||||
|
fragment@0 {
|
||||||
|
target-path = "/";
|
||||||
|
__overlay__ {
|
||||||
|
midi_clk: midi_clk4 {
|
||||||
|
compatible = "fixed-clock";
|
||||||
|
#clock-cells = <0>;
|
||||||
|
clock-output-names = "uart4_pclk";
|
||||||
|
clock-frequency = <58982400>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fragment@1 {
|
||||||
|
target = <&uart4>;
|
||||||
|
__overlay__ {
|
||||||
|
clocks = <&midi_clk>,
|
||||||
|
<&clocks BCM2835_CLOCK_VPU>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
38
arch/arm/boot/dts/overlays/midi-uart5-overlay.dts
Normal file
38
arch/arm/boot/dts/overlays/midi-uart5-overlay.dts
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/dts-v1/;
|
||||||
|
/plugin/;
|
||||||
|
|
||||||
|
#include <dt-bindings/clock/bcm2835.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fake a higher clock rate to get a larger divisor, and thereby a lower
|
||||||
|
* baudrate. The real clock is 48MHz, which we scale so that requesting
|
||||||
|
* 38.4kHz results in an actual 31.25kHz.
|
||||||
|
*
|
||||||
|
* 48000000*38400/31250 = 58982400
|
||||||
|
*/
|
||||||
|
|
||||||
|
/{
|
||||||
|
compatible = "brcm,bcm2835";
|
||||||
|
|
||||||
|
fragment@0 {
|
||||||
|
target-path = "/";
|
||||||
|
__overlay__ {
|
||||||
|
midi_clk: midi_clk5 {
|
||||||
|
compatible = "fixed-clock";
|
||||||
|
#clock-cells = <0>;
|
||||||
|
clock-output-names = "uart5_pclk";
|
||||||
|
clock-frequency = <58982400>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fragment@1 {
|
||||||
|
target = <&uart5>;
|
||||||
|
__overlay__ {
|
||||||
|
clocks = <&midi_clk>,
|
||||||
|
<&clocks BCM2835_CLOCK_VPU>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
|||||||
kvm->arch.return_nisv_io_abort_to_user = true;
|
kvm->arch.return_nisv_io_abort_to_user = true;
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_ARM_MTE:
|
case KVM_CAP_ARM_MTE:
|
||||||
if (!system_supports_mte() || kvm->created_vcpus)
|
mutex_lock(&kvm->lock);
|
||||||
return -EINVAL;
|
if (!system_supports_mte() || kvm->created_vcpus) {
|
||||||
r = 0;
|
r = -EINVAL;
|
||||||
kvm->arch.mte_enabled = true;
|
} else {
|
||||||
|
r = 0;
|
||||||
|
kvm->arch.mte_enabled = true;
|
||||||
|
}
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
|
@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
|
|||||||
{
|
{
|
||||||
struct kvm_mem_range r1, r2;
|
struct kvm_mem_range r1, r2;
|
||||||
|
|
||||||
if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
|
if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
|
||||||
return false;
|
return false;
|
||||||
if (r1.start != r2.start)
|
if (r1.start != r2.start)
|
||||||
return false;
|
return false;
|
||||||
|
@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||||||
|
|
||||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||||
|
|
||||||
|
/* irq.c */
|
||||||
|
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
|
||||||
|
|
||||||
void __noreturn unrecoverable_exception(struct pt_regs *regs);
|
void __noreturn unrecoverable_exception(struct pt_regs *regs);
|
||||||
|
|
||||||
void replay_system_reset(void);
|
void replay_system_reset(void);
|
||||||
|
@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
|
|||||||
extern void *hardirq_ctx[NR_CPUS];
|
extern void *hardirq_ctx[NR_CPUS];
|
||||||
extern void *softirq_ctx[NR_CPUS];
|
extern void *softirq_ctx[NR_CPUS];
|
||||||
|
|
||||||
extern void do_IRQ(struct pt_regs *regs);
|
void __do_IRQ(struct pt_regs *regs);
|
||||||
extern void __init init_IRQ(void);
|
extern void __init init_IRQ(void);
|
||||||
extern void __do_irq(struct pt_regs *regs);
|
extern void __do_irq(struct pt_regs *regs);
|
||||||
|
|
||||||
|
@ -70,6 +70,22 @@ struct pt_regs
|
|||||||
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
|
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
|
||||||
|
struct { /* Must be a multiple of 16 bytes */
|
||||||
|
unsigned long mas0;
|
||||||
|
unsigned long mas1;
|
||||||
|
unsigned long mas2;
|
||||||
|
unsigned long mas3;
|
||||||
|
unsigned long mas6;
|
||||||
|
unsigned long mas7;
|
||||||
|
unsigned long srr0;
|
||||||
|
unsigned long srr1;
|
||||||
|
unsigned long csrr0;
|
||||||
|
unsigned long csrr1;
|
||||||
|
unsigned long dsrr0;
|
||||||
|
unsigned long dsrr1;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -309,24 +309,21 @@ int main(void)
|
|||||||
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
|
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_PPC32)
|
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
|
||||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
STACK_PT_REGS_OFFSET(MAS0, mas0);
|
||||||
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
|
|
||||||
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
|
|
||||||
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
|
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
|
||||||
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
|
STACK_PT_REGS_OFFSET(MMUCR, mas0);
|
||||||
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
|
STACK_PT_REGS_OFFSET(MAS1, mas1);
|
||||||
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
|
STACK_PT_REGS_OFFSET(MAS2, mas2);
|
||||||
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
|
STACK_PT_REGS_OFFSET(MAS3, mas3);
|
||||||
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
|
STACK_PT_REGS_OFFSET(MAS6, mas6);
|
||||||
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
|
STACK_PT_REGS_OFFSET(MAS7, mas7);
|
||||||
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
|
STACK_PT_REGS_OFFSET(_SRR0, srr0);
|
||||||
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
|
STACK_PT_REGS_OFFSET(_SRR1, srr1);
|
||||||
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
|
STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
|
||||||
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
|
STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
|
||||||
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
|
STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
|
||||||
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
|
STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* About the CPU features table */
|
/* About the CPU features table */
|
||||||
|
@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
|||||||
EXCEPTION_PROLOG_1
|
EXCEPTION_PROLOG_1
|
||||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
|
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
|
||||||
prepare_transfer_to_handler
|
prepare_transfer_to_handler
|
||||||
lwz r5, _DSISR(r11)
|
lwz r5, _DSISR(r1)
|
||||||
andis. r0, r5, DSISR_DABRMATCH@h
|
andis. r0, r5, DSISR_DABRMATCH@h
|
||||||
bne- 1f
|
bne- 1f
|
||||||
bl do_page_fault
|
bl do_page_fault
|
||||||
|
@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||||||
/* only on e500mc */
|
/* only on e500mc */
|
||||||
#define DBG_STACK_BASE dbgirq_ctx
|
#define DBG_STACK_BASE dbgirq_ctx
|
||||||
|
|
||||||
#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
||||||
mfspr r8,SPRN_PIR; \
|
mfspr r8,SPRN_PIR; \
|
||||||
slwi r8,r8,2; \
|
slwi r8,r8,2; \
|
||||||
addis r8,r8,level##_STACK_BASE@ha; \
|
addis r8,r8,level##_STACK_BASE@ha; \
|
||||||
lwz r8,level##_STACK_BASE@l(r8); \
|
lwz r8,level##_STACK_BASE@l(r8); \
|
||||||
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
|
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
|
||||||
#else
|
#else
|
||||||
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
||||||
lis r8,level##_STACK_BASE@ha; \
|
lis r8,level##_STACK_BASE@ha; \
|
||||||
lwz r8,level##_STACK_BASE@l(r8); \
|
lwz r8,level##_STACK_BASE@l(r8); \
|
||||||
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
|
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||||||
mtmsr r11; \
|
mtmsr r11; \
|
||||||
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
|
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
|
||||||
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
|
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
|
||||||
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
|
addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
|
||||||
beq 1f; \
|
beq 1f; \
|
||||||
/* COMING FROM USER MODE */ \
|
/* COMING FROM USER MODE */ \
|
||||||
stw r9,_CCR(r11); /* save CR */\
|
stw r9,_CCR(r11); /* save CR */\
|
||||||
@ -516,24 +514,5 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||||||
bl kernel_fp_unavailable_exception; \
|
bl kernel_fp_unavailable_exception; \
|
||||||
b interrupt_return
|
b interrupt_return
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
|
||||||
struct exception_regs {
|
|
||||||
unsigned long mas0;
|
|
||||||
unsigned long mas1;
|
|
||||||
unsigned long mas2;
|
|
||||||
unsigned long mas3;
|
|
||||||
unsigned long mas6;
|
|
||||||
unsigned long mas7;
|
|
||||||
unsigned long srr0;
|
|
||||||
unsigned long srr1;
|
|
||||||
unsigned long csrr0;
|
|
||||||
unsigned long csrr1;
|
|
||||||
unsigned long dsrr0;
|
|
||||||
unsigned long dsrr1;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* ensure this structure is always sized to a multiple of the stack alignment */
|
|
||||||
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __HEAD_BOOKE_H__ */
|
#endif /* __HEAD_BOOKE_H__ */
|
||||||
|
@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
|
|||||||
trace_irq_exit(regs);
|
trace_irq_exit(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
void __do_IRQ(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
void *cursp, *irqsp, *sirqsp;
|
void *cursp, *irqsp, *sirqsp;
|
||||||
@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
|||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
||||||
|
{
|
||||||
|
__do_IRQ(regs);
|
||||||
|
}
|
||||||
|
|
||||||
static void *__init alloc_vm_stack(void)
|
static void *__init alloc_vm_stack(void)
|
||||||
{
|
{
|
||||||
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
|
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
|
||||||
|
@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
|
|||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
|
if (!IS_ENABLED(CONFIG_BOOKE) &&
|
||||||
|
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1167,7 +1167,7 @@ static int __init topology_init(void)
|
|||||||
* CPU. For instance, the boot cpu might never be valid
|
* CPU. For instance, the boot cpu might never be valid
|
||||||
* for hotplugging.
|
* for hotplugging.
|
||||||
*/
|
*/
|
||||||
if (smp_ops->cpu_offline_self)
|
if (smp_ops && smp_ops->cpu_offline_self)
|
||||||
c->hotpluggable = 1;
|
c->hotpluggable = 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
|
|||||||
|
|
||||||
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
|
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
|
||||||
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
||||||
do_IRQ(regs);
|
__do_IRQ(regs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
old_regs = set_irq_regs(regs);
|
old_regs = set_irq_regs(regs);
|
||||||
|
@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
|
|||||||
_exception(SIGTRAP, regs, TRAP_UNK, 0);
|
_exception(SIGTRAP, regs, TRAP_UNK, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
static void __single_step_exception(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
clear_single_step(regs);
|
clear_single_step(regs);
|
||||||
clear_br_trace(regs);
|
clear_br_trace(regs);
|
||||||
@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
|||||||
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
||||||
|
{
|
||||||
|
__single_step_exception(regs);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After we have successfully emulated an instruction, we have to
|
* After we have successfully emulated an instruction, we have to
|
||||||
* check if the instruction was being single-stepped, and if so,
|
* check if the instruction was being single-stepped, and if so,
|
||||||
@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
|||||||
static void emulate_single_step(struct pt_regs *regs)
|
static void emulate_single_step(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (single_stepping(regs))
|
if (single_stepping(regs))
|
||||||
single_step_exception(regs);
|
__single_step_exception(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __parse_fpscr(unsigned long fpscr)
|
static inline int __parse_fpscr(unsigned long fpscr)
|
||||||
|
@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
|
|||||||
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
|
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
|
||||||
* H_CPU_BEHAV_FAVOUR_SECURITY is.
|
* H_CPU_BEHAV_FAVOUR_SECURITY is.
|
||||||
*/
|
*/
|
||||||
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
|
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
|
||||||
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
|
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
|
||||||
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
|
pseries_security_flavor = 0;
|
||||||
|
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
|
||||||
pseries_security_flavor = 1;
|
pseries_security_flavor = 1;
|
||||||
else
|
else
|
||||||
pseries_security_flavor = 2;
|
pseries_security_flavor = 2;
|
||||||
|
@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
|
|||||||
static struct xive_ipi_desc {
|
static struct xive_ipi_desc {
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
char name[16];
|
char name[16];
|
||||||
|
atomic_t started;
|
||||||
} *xive_ipis;
|
} *xive_ipis;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
|
|||||||
.alloc = xive_ipi_irq_domain_alloc,
|
.alloc = xive_ipi_irq_domain_alloc,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init xive_request_ipi(void)
|
static int __init xive_init_ipis(void)
|
||||||
{
|
{
|
||||||
struct fwnode_handle *fwnode;
|
struct fwnode_handle *fwnode;
|
||||||
struct irq_domain *ipi_domain;
|
struct irq_domain *ipi_domain;
|
||||||
@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
|
|||||||
struct xive_ipi_desc *xid = &xive_ipis[node];
|
struct xive_ipi_desc *xid = &xive_ipis[node];
|
||||||
struct xive_ipi_alloc_info info = { node };
|
struct xive_ipi_alloc_info info = { node };
|
||||||
|
|
||||||
/* Skip nodes without CPUs */
|
|
||||||
if (cpumask_empty(cpumask_of_node(node)))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map one IPI interrupt per node for all cpus of that node.
|
* Map one IPI interrupt per node for all cpus of that node.
|
||||||
* Since the HW interrupt number doesn't have any meaning,
|
* Since the HW interrupt number doesn't have any meaning,
|
||||||
@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
|
|||||||
xid->irq = ret;
|
xid->irq = ret;
|
||||||
|
|
||||||
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
|
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
|
||||||
|
|
||||||
ret = request_irq(xid->irq, xive_muxed_ipi_action,
|
|
||||||
IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
|
|
||||||
|
|
||||||
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1178,6 +1170,22 @@ static int __init xive_request_ipi(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init xive_request_ipi(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (atomic_inc_return(&xid->started) > 1)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = request_irq(xid->irq, xive_muxed_ipi_action,
|
||||||
|
IRQF_PERCPU | IRQF_NO_THREAD,
|
||||||
|
xid->name, NULL);
|
||||||
|
|
||||||
|
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int xive_setup_cpu_ipi(unsigned int cpu)
|
static int xive_setup_cpu_ipi(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
|
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
|
||||||
@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
|
|||||||
if (xc->hw_ipi != XIVE_BAD_IRQ)
|
if (xc->hw_ipi != XIVE_BAD_IRQ)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* Register the IPI */
|
||||||
|
xive_request_ipi(cpu);
|
||||||
|
|
||||||
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
|
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
|
||||||
if (xive_ops->get_ipi(cpu, xc))
|
if (xive_ops->get_ipi(cpu, xc))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
|
|||||||
if (xc->hw_ipi == XIVE_BAD_IRQ)
|
if (xc->hw_ipi == XIVE_BAD_IRQ)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* TODO: clear IPI mapping */
|
||||||
|
|
||||||
/* Mask the IPI */
|
/* Mask the IPI */
|
||||||
xive_do_source_set_mask(&xc->ipi_data, true);
|
xive_do_source_set_mask(&xc->ipi_data, true);
|
||||||
|
|
||||||
@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
|
|||||||
smp_ops->cause_ipi = xive_cause_ipi;
|
smp_ops->cause_ipi = xive_cause_ipi;
|
||||||
|
|
||||||
/* Register the IPI */
|
/* Register the IPI */
|
||||||
xive_request_ipi();
|
xive_init_ipis();
|
||||||
|
|
||||||
/* Allocate and setup IPI for the boot CPU */
|
/* Allocate and setup IPI for the boot CPU */
|
||||||
xive_setup_cpu_ipi(smp_processor_id());
|
xive_setup_cpu_ipi(smp_processor_id());
|
||||||
|
@ -11,7 +11,7 @@ endif
|
|||||||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||||
|
|
||||||
ifdef CONFIG_KEXEC
|
ifdef CONFIG_KEXEC
|
||||||
AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
|
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
extra-y += head.o
|
extra-y += head.o
|
||||||
|
@ -197,7 +197,7 @@ static void __init setup_bootmem(void)
|
|||||||
* if end of dram is equal to maximum addressable memory. For 64-bit
|
* if end of dram is equal to maximum addressable memory. For 64-bit
|
||||||
* kernel, this problem can't happen here as the end of the virtual
|
* kernel, this problem can't happen here as the end of the virtual
|
||||||
* address space is occupied by the kernel mapping then this check must
|
* address space is occupied by the kernel mapping then this check must
|
||||||
* be done in create_kernel_page_table.
|
* be done as soon as the kernel mapping base address is determined.
|
||||||
*/
|
*/
|
||||||
max_mapped_addr = __pa(~(ulong)0);
|
max_mapped_addr = __pa(~(ulong)0);
|
||||||
if (max_mapped_addr == (phys_ram_end - 1))
|
if (max_mapped_addr == (phys_ram_end - 1))
|
||||||
|
@ -1038,6 +1038,13 @@ struct kvm_arch {
|
|||||||
struct list_head lpage_disallowed_mmu_pages;
|
struct list_head lpage_disallowed_mmu_pages;
|
||||||
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
||||||
struct kvm_page_track_notifier_head track_notifier_head;
|
struct kvm_page_track_notifier_head track_notifier_head;
|
||||||
|
/*
|
||||||
|
* Protects marking pages unsync during page faults, as TDP MMU page
|
||||||
|
* faults only take mmu_lock for read. For simplicity, the unsync
|
||||||
|
* pages lock is always taken when marking pages unsync regardless of
|
||||||
|
* whether mmu_lock is held for read or write.
|
||||||
|
*/
|
||||||
|
spinlock_t mmu_unsync_pages_lock;
|
||||||
|
|
||||||
struct list_head assigned_dev_head;
|
struct list_head assigned_dev_head;
|
||||||
struct iommu_domain *iommu_domain;
|
struct iommu_domain *iommu_domain;
|
||||||
|
@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
|||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_chip ioapic_ir_chip __read_mostly = {
|
static struct irq_chip ioapic_ir_chip __read_mostly = {
|
||||||
@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
|
|||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void init_IO_APIC_traps(void)
|
static inline void init_IO_APIC_traps(void)
|
||||||
|
@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
|
|||||||
* The quirk bit is not set in this case.
|
* The quirk bit is not set in this case.
|
||||||
* - The new vector is the same as the old vector
|
* - The new vector is the same as the old vector
|
||||||
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
||||||
|
* - The interrupt is not yet started up
|
||||||
* - The new destination CPU is the same as the old destination CPU
|
* - The new destination CPU is the same as the old destination CPU
|
||||||
*/
|
*/
|
||||||
if (!irqd_msi_nomask_quirk(irqd) ||
|
if (!irqd_msi_nomask_quirk(irqd) ||
|
||||||
cfg->vector == old_cfg.vector ||
|
cfg->vector == old_cfg.vector ||
|
||||||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
||||||
|
!irqd_is_started(irqd) ||
|
||||||
cfg->dest_apicid == old_cfg.dest_apicid) {
|
cfg->dest_apicid == old_cfg.dest_apicid) {
|
||||||
irq_msi_update_msg(irqd, cfg);
|
irq_msi_update_msg(irqd, cfg);
|
||||||
return ret;
|
return ret;
|
||||||
@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
|
|||||||
.irq_ack = irq_chip_ack_parent,
|
.irq_ack = irq_chip_ack_parent,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.irq_set_affinity = msi_set_affinity,
|
.irq_set_affinity = msi_set_affinity,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
|
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
|
||||||
@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
|
|||||||
.irq_mask = pci_msi_mask_irq,
|
.irq_mask = pci_msi_mask_irq,
|
||||||
.irq_ack = irq_chip_ack_parent,
|
.irq_ack = irq_chip_ack_parent,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct msi_domain_info pci_msi_ir_domain_info = {
|
static struct msi_domain_info pci_msi_ir_domain_info = {
|
||||||
@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
|
|||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.irq_compose_msi_msg = dmar_msi_compose_msg,
|
.irq_compose_msi_msg = dmar_msi_compose_msg,
|
||||||
.irq_write_msi_msg = dmar_msi_write_msg,
|
.irq_write_msi_msg = dmar_msi_write_msg,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int dmar_msi_init(struct irq_domain *domain,
|
static int dmar_msi_init(struct irq_domain *domain,
|
||||||
|
@ -285,15 +285,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
|||||||
return chunks >>= shift;
|
return chunks >>= shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __mon_event_count(u32 rmid, struct rmid_read *rr)
|
static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||||
{
|
{
|
||||||
struct mbm_state *m;
|
struct mbm_state *m;
|
||||||
u64 chunks, tval;
|
u64 chunks, tval;
|
||||||
|
|
||||||
tval = __rmid_read(rmid, rr->evtid);
|
tval = __rmid_read(rmid, rr->evtid);
|
||||||
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
|
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
|
||||||
rr->val = tval;
|
return tval;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
switch (rr->evtid) {
|
switch (rr->evtid) {
|
||||||
case QOS_L3_OCCUP_EVENT_ID:
|
case QOS_L3_OCCUP_EVENT_ID:
|
||||||
@ -305,12 +304,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
|
|||||||
case QOS_L3_MBM_LOCAL_EVENT_ID:
|
case QOS_L3_MBM_LOCAL_EVENT_ID:
|
||||||
m = &rr->d->mbm_local[rmid];
|
m = &rr->d->mbm_local[rmid];
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
/*
|
|
||||||
* Code would never reach here because
|
|
||||||
* an invalid event id would fail the __rmid_read.
|
|
||||||
*/
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rr->first) {
|
if (rr->first) {
|
||||||
@ -361,23 +354,29 @@ void mon_event_count(void *info)
|
|||||||
struct rdtgroup *rdtgrp, *entry;
|
struct rdtgroup *rdtgrp, *entry;
|
||||||
struct rmid_read *rr = info;
|
struct rmid_read *rr = info;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
|
u64 ret_val;
|
||||||
|
|
||||||
rdtgrp = rr->rgrp;
|
rdtgrp = rr->rgrp;
|
||||||
|
|
||||||
if (__mon_event_count(rdtgrp->mon.rmid, rr))
|
ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For Ctrl groups read data from child monitor groups.
|
* For Ctrl groups read data from child monitor groups and
|
||||||
|
* add them together. Count events which are read successfully.
|
||||||
|
* Discard the rmid_read's reporting errors.
|
||||||
*/
|
*/
|
||||||
head = &rdtgrp->mon.crdtgrp_list;
|
head = &rdtgrp->mon.crdtgrp_list;
|
||||||
|
|
||||||
if (rdtgrp->type == RDTCTRL_GROUP) {
|
if (rdtgrp->type == RDTCTRL_GROUP) {
|
||||||
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
||||||
if (__mon_event_count(entry->mon.rmid, rr))
|
if (__mon_event_count(entry->mon.rmid, rr) == 0)
|
||||||
return;
|
ret_val = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Report error if none of rmid_reads are successful */
|
||||||
|
if (ret_val)
|
||||||
|
rr->val = ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
|
|||||||
.irq_set_affinity = msi_domain_set_affinity,
|
.irq_set_affinity = msi_domain_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
.irq_write_msi_msg = hpet_msi_write_msg,
|
.irq_write_msi_msg = hpet_msi_write_msg,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int hpet_msi_init(struct irq_domain *domain,
|
static int hpet_msi_init(struct irq_domain *domain,
|
||||||
|
@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||||||
kvm_mmu_after_set_cpuid(vcpu);
|
kvm_mmu_after_set_cpuid(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_efer_nx(void)
|
|
||||||
{
|
|
||||||
return host_efer & EFER_NX;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct kvm_cpuid_entry2 *e, *entry;
|
|
||||||
|
|
||||||
entry = NULL;
|
|
||||||
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
|
|
||||||
e = &vcpu->arch.cpuid_entries[i];
|
|
||||||
if (e->function == 0x80000001) {
|
|
||||||
entry = e;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
|
|
||||||
cpuid_entry_clear(entry, X86_FEATURE_NX);
|
|
||||||
printk(KERN_INFO "kvm: guest NX capability removed\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
|
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *best;
|
struct kvm_cpuid_entry2 *best;
|
||||||
@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||||||
vcpu->arch.cpuid_entries = e2;
|
vcpu->arch.cpuid_entries = e2;
|
||||||
vcpu->arch.cpuid_nent = cpuid->nent;
|
vcpu->arch.cpuid_nent = cpuid->nent;
|
||||||
|
|
||||||
cpuid_fix_nx_cap(vcpu);
|
|
||||||
kvm_update_cpuid_runtime(vcpu);
|
kvm_update_cpuid_runtime(vcpu);
|
||||||
kvm_vcpu_after_set_cpuid(vcpu);
|
kvm_vcpu_after_set_cpuid(vcpu);
|
||||||
|
|
||||||
@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
|
|||||||
|
|
||||||
void kvm_set_cpu_caps(void)
|
void kvm_set_cpu_caps(void)
|
||||||
{
|
{
|
||||||
unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
unsigned int f_gbpages = F(GBPAGES);
|
unsigned int f_gbpages = F(GBPAGES);
|
||||||
unsigned int f_lm = F(LM);
|
unsigned int f_lm = F(LM);
|
||||||
@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
|
|||||||
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
|
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
|
||||||
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
|
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
|
||||||
F(PAT) | F(PSE36) | 0 /* Reserved */ |
|
F(PAT) | F(PSE36) | 0 /* Reserved */ |
|
||||||
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
|
F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
|
||||||
F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
|
F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
|
||||||
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
|
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
|
||||||
);
|
);
|
||||||
|
@ -1933,7 +1933,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
|||||||
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
|
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *entry;
|
struct kvm_cpuid_entry2 *entry;
|
||||||
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
|
struct kvm_vcpu_hv *hv_vcpu;
|
||||||
|
|
||||||
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
|
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
|
||||||
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
|
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
|
||||||
|
@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||||||
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
|
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
|
bool locked = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force write-protection if the page is being tracked. Note, the page
|
* Force write-protection if the page is being tracked. Note, the page
|
||||||
@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
|
|||||||
if (sp->unsync)
|
if (sp->unsync)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TDP MMU page faults require an additional spinlock as they
|
||||||
|
* run with mmu_lock held for read, not write, and the unsync
|
||||||
|
* logic is not thread safe. Take the spinklock regardless of
|
||||||
|
* the MMU type to avoid extra conditionals/parameters, there's
|
||||||
|
* no meaningful penalty if mmu_lock is held for write.
|
||||||
|
*/
|
||||||
|
if (!locked) {
|
||||||
|
locked = true;
|
||||||
|
spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Recheck after taking the spinlock, a different vCPU
|
||||||
|
* may have since marked the page unsync. A false
|
||||||
|
* positive on the unprotected check above is not
|
||||||
|
* possible as clearing sp->unsync _must_ hold mmu_lock
|
||||||
|
* for write, i.e. unsync cannot transition from 0->1
|
||||||
|
* while this CPU holds mmu_lock for read (or write).
|
||||||
|
*/
|
||||||
|
if (READ_ONCE(sp->unsync))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
WARN_ON(sp->role.level != PG_LEVEL_4K);
|
WARN_ON(sp->role.level != PG_LEVEL_4K);
|
||||||
kvm_unsync_page(vcpu, sp);
|
kvm_unsync_page(vcpu, sp);
|
||||||
}
|
}
|
||||||
|
if (locked)
|
||||||
|
spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to ensure that the marking of unsync pages is visible
|
* We need to ensure that the marking of unsync pages is visible
|
||||||
@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
|
|||||||
{
|
{
|
||||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||||
|
|
||||||
|
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
||||||
|
|
||||||
if (!kvm_mmu_init_tdp_mmu(kvm))
|
if (!kvm_mmu_init_tdp_mmu(kvm))
|
||||||
/*
|
/*
|
||||||
* No smp_load/store wrappers needed here as we are in
|
* No smp_load/store wrappers needed here as we are in
|
||||||
|
@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
|||||||
if (!kvm->arch.tdp_mmu_enabled)
|
if (!kvm->arch.tdp_mmu_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
|
||||||
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
|
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
|
|||||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
bool shared)
|
bool shared)
|
||||||
{
|
{
|
||||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
|
||||||
|
|
||||||
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
||||||
|
|
||||||
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
|
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
|
||||||
@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||||||
list_del_rcu(&root->link);
|
list_del_rcu(&root->link);
|
||||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||||
|
|
||||||
zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
|
zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
|
||||||
|
|
||||||
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
||||||
}
|
}
|
||||||
@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||||||
gfn_t start, gfn_t end, bool can_yield, bool flush,
|
gfn_t start, gfn_t end, bool can_yield, bool flush,
|
||||||
bool shared)
|
bool shared)
|
||||||
{
|
{
|
||||||
|
gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||||
|
bool zap_all = (start == 0 && end >= max_gfn_host);
|
||||||
struct tdp_iter iter;
|
struct tdp_iter iter;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No need to try to step down in the iterator when zapping all SPTEs,
|
||||||
|
* zapping the top-level non-leaf SPTEs will recurse on their children.
|
||||||
|
*/
|
||||||
|
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
|
||||||
|
* hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
|
||||||
|
* and so KVM will never install a SPTE for such addresses.
|
||||||
|
*/
|
||||||
|
end = min(end, max_gfn_host);
|
||||||
|
|
||||||
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
tdp_root_for_each_pte(iter, root, start, end) {
|
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
|
||||||
|
min_level, start, end) {
|
||||||
retry:
|
retry:
|
||||||
if (can_yield &&
|
if (can_yield &&
|
||||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
|
tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
|
||||||
@ -744,9 +759,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||||||
/*
|
/*
|
||||||
* If this is a non-last-level SPTE that covers a larger range
|
* If this is a non-last-level SPTE that covers a larger range
|
||||||
* than should be zapped, continue, and zap the mappings at a
|
* than should be zapped, continue, and zap the mappings at a
|
||||||
* lower level.
|
* lower level, except when zapping all SPTEs.
|
||||||
*/
|
*/
|
||||||
if ((iter.gfn < start ||
|
if (!zap_all &&
|
||||||
|
(iter.gfn < start ||
|
||||||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
|
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
|
||||||
!is_last_spte(iter.old_spte, iter.level))
|
!is_last_spte(iter.old_spte, iter.level))
|
||||||
continue;
|
continue;
|
||||||
@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
|||||||
|
|
||||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
|
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
|
||||||
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
|
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
|
||||||
flush, false);
|
flush, false);
|
||||||
|
|
||||||
if (flush)
|
if (flush)
|
||||||
@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
|
|||||||
*/
|
*/
|
||||||
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
|
||||||
struct kvm_mmu_page *next_root;
|
struct kvm_mmu_page *next_root;
|
||||||
struct kvm_mmu_page *root;
|
struct kvm_mmu_page *root;
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
|||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
|
flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
|
||||||
true);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Put the reference acquired in
|
* Put the reference acquired in
|
||||||
|
@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
vcpu_put(vcpu);
|
vcpu_put(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
|
||||||
|
|
||||||
|
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
|
||||||
|
{
|
||||||
|
return VALID_PAGE(root_hpa) &&
|
||||||
|
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
|
||||||
|
gpa_t addr)
|
||||||
|
{
|
||||||
|
uint i;
|
||||||
|
struct kvm_mmu_root_info *cached_root;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!mmu_is_nested(vcpu));
|
||||||
|
|
||||||
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
||||||
|
cached_root = &vcpu->arch.mmu->prev_roots[i];
|
||||||
|
|
||||||
|
if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
|
||||||
|
eptp))
|
||||||
|
vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
|
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
|
||||||
struct x86_exception *fault)
|
struct x86_exception *fault)
|
||||||
{
|
{
|
||||||
@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
|
|||||||
vm_exit_reason = EXIT_REASON_PML_FULL;
|
vm_exit_reason = EXIT_REASON_PML_FULL;
|
||||||
vmx->nested.pml_full = false;
|
vmx->nested.pml_full = false;
|
||||||
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
|
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
|
||||||
} else if (fault->error_code & PFERR_RSVD_MASK)
|
} else {
|
||||||
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
|
if (fault->error_code & PFERR_RSVD_MASK)
|
||||||
else
|
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
|
||||||
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
|
else
|
||||||
|
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Although the caller (kvm_inject_emulated_page_fault) would
|
||||||
|
* have already synced the faulting address in the shadow EPT
|
||||||
|
* tables for the current EPTP12, we also need to sync it for
|
||||||
|
* any other cached EPTP02s based on the same EP4TA, since the
|
||||||
|
* TLB associates mappings to the EP4TA rather than the full EPTP.
|
||||||
|
*/
|
||||||
|
nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
|
||||||
|
fault->address);
|
||||||
|
}
|
||||||
|
|
||||||
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
|
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
|
||||||
vmcs12->guest_physical_address = fault->address;
|
vmcs12->guest_physical_address = fault->address;
|
||||||
@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
|
|||||||
return nested_vmx_succeed(vcpu);
|
return nested_vmx_succeed(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
|
|
||||||
|
|
||||||
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
|
|
||||||
{
|
|
||||||
return VALID_PAGE(root_hpa) &&
|
|
||||||
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Emulate the INVEPT instruction */
|
/* Emulate the INVEPT instruction */
|
||||||
static int handle_invept(struct kvm_vcpu *vcpu)
|
static int handle_invept(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
|||||||
if (is_nmi(intr_info))
|
if (is_nmi(intr_info))
|
||||||
return true;
|
return true;
|
||||||
else if (is_page_fault(intr_info))
|
else if (is_page_fault(intr_info))
|
||||||
return vcpu->arch.apf.host_apf_flags || !enable_ept;
|
return vcpu->arch.apf.host_apf_flags ||
|
||||||
|
vmx_need_pf_intercept(vcpu);
|
||||||
else if (is_debug(intr_info) &&
|
else if (is_debug(intr_info) &&
|
||||||
vcpu->guest_debug &
|
vcpu->guest_debug &
|
||||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
||||||
|
@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
|
|||||||
|
|
||||||
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
return vmx->secondary_exec_control &
|
return secondary_exec_controls_get(vmx) &
|
||||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ BEGIN {
|
|||||||
|
|
||||||
/^GNU objdump/ {
|
/^GNU objdump/ {
|
||||||
verstr = ""
|
verstr = ""
|
||||||
|
gsub(/\(.*\)/, "");
|
||||||
for (i = 3; i <= NF; i++)
|
for (i = 3; i <= NF; i++)
|
||||||
if (match($(i), "^[0-9]")) {
|
if (match($(i), "^[0-9]")) {
|
||||||
verstr = $(i);
|
verstr = $(i);
|
||||||
|
@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
|
|||||||
help
|
help
|
||||||
MQ version of the deadline IO scheduler.
|
MQ version of the deadline IO scheduler.
|
||||||
|
|
||||||
config MQ_IOSCHED_DEADLINE_CGROUP
|
|
||||||
tristate
|
|
||||||
default y
|
|
||||||
depends on MQ_IOSCHED_DEADLINE
|
|
||||||
depends on BLK_CGROUP
|
|
||||||
|
|
||||||
config MQ_IOSCHED_KYBER
|
config MQ_IOSCHED_KYBER
|
||||||
tristate "Kyber I/O scheduler"
|
tristate "Kyber I/O scheduler"
|
||||||
default y
|
default y
|
||||||
|
@ -22,8 +22,6 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o
|
|||||||
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
|
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
|
||||||
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
|
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
|
||||||
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
|
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
|
||||||
mq-deadline-y += mq-deadline-main.o
|
|
||||||
mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
|
|
||||||
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
|
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
|
||||||
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
|
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
|
||||||
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
||||||
|
@ -790,6 +790,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
|
|||||||
struct blkcg_gq *parent = blkg->parent;
|
struct blkcg_gq *parent = blkg->parent;
|
||||||
struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
|
struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
|
||||||
struct blkg_iostat cur, delta;
|
struct blkg_iostat cur, delta;
|
||||||
|
unsigned long flags;
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
|
|
||||||
/* fetch the current per-cpu values */
|
/* fetch the current per-cpu values */
|
||||||
@ -799,21 +800,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
|
|||||||
} while (u64_stats_fetch_retry(&bisc->sync, seq));
|
} while (u64_stats_fetch_retry(&bisc->sync, seq));
|
||||||
|
|
||||||
/* propagate percpu delta to global */
|
/* propagate percpu delta to global */
|
||||||
u64_stats_update_begin(&blkg->iostat.sync);
|
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
|
||||||
blkg_iostat_set(&delta, &cur);
|
blkg_iostat_set(&delta, &cur);
|
||||||
blkg_iostat_sub(&delta, &bisc->last);
|
blkg_iostat_sub(&delta, &bisc->last);
|
||||||
blkg_iostat_add(&blkg->iostat.cur, &delta);
|
blkg_iostat_add(&blkg->iostat.cur, &delta);
|
||||||
blkg_iostat_add(&bisc->last, &delta);
|
blkg_iostat_add(&bisc->last, &delta);
|
||||||
u64_stats_update_end(&blkg->iostat.sync);
|
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
|
||||||
|
|
||||||
/* propagate global delta to parent (unless that's root) */
|
/* propagate global delta to parent (unless that's root) */
|
||||||
if (parent && parent->parent) {
|
if (parent && parent->parent) {
|
||||||
u64_stats_update_begin(&parent->iostat.sync);
|
flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
|
||||||
blkg_iostat_set(&delta, &blkg->iostat.cur);
|
blkg_iostat_set(&delta, &blkg->iostat.cur);
|
||||||
blkg_iostat_sub(&delta, &blkg->iostat.last);
|
blkg_iostat_sub(&delta, &blkg->iostat.last);
|
||||||
blkg_iostat_add(&parent->iostat.cur, &delta);
|
blkg_iostat_add(&parent->iostat.cur, &delta);
|
||||||
blkg_iostat_add(&blkg->iostat.last, &delta);
|
blkg_iostat_add(&blkg->iostat.last, &delta);
|
||||||
u64_stats_update_end(&parent->iostat.sync);
|
u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -848,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
|
|||||||
memset(&tmp, 0, sizeof(tmp));
|
memset(&tmp, 0, sizeof(tmp));
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct disk_stats *cpu_dkstats;
|
struct disk_stats *cpu_dkstats;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
|
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
|
||||||
tmp.ios[BLKG_IOSTAT_READ] +=
|
tmp.ios[BLKG_IOSTAT_READ] +=
|
||||||
@ -864,9 +866,9 @@ static void blkcg_fill_root_iostats(void)
|
|||||||
tmp.bytes[BLKG_IOSTAT_DISCARD] +=
|
tmp.bytes[BLKG_IOSTAT_DISCARD] +=
|
||||||
cpu_dkstats->sectors[STAT_DISCARD] << 9;
|
cpu_dkstats->sectors[STAT_DISCARD] << 9;
|
||||||
|
|
||||||
u64_stats_update_begin(&blkg->iostat.sync);
|
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
|
||||||
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
||||||
u64_stats_update_end(&blkg->iostat.sync);
|
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2994,10 +2994,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (shared)
|
if (shared) {
|
||||||
hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
|
hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||||
else
|
} else {
|
||||||
|
blk_mq_tag_idle(hctx);
|
||||||
hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||||||
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
||||||
struct nd_mapping_desc *mapping;
|
struct nd_mapping_desc *mapping;
|
||||||
|
|
||||||
|
/* range index 0 == unmapped in SPA or invalid-SPA */
|
||||||
|
if (memdev->range_index == 0 || spa->range_index == 0)
|
||||||
|
continue;
|
||||||
if (memdev->range_index != spa->range_index)
|
if (memdev->range_index != spa->range_index)
|
||||||
continue;
|
continue;
|
||||||
if (count >= ND_MAX_MAPPINGS) {
|
if (count >= ND_MAX_MAPPINGS) {
|
||||||
|
@ -2837,6 +2837,7 @@ void device_initialize(struct device *dev)
|
|||||||
device_pm_init(dev);
|
device_pm_init(dev);
|
||||||
set_dev_node(dev, -1);
|
set_dev_node(dev, -1);
|
||||||
#ifdef CONFIG_GENERIC_MSI_IRQ
|
#ifdef CONFIG_GENERIC_MSI_IRQ
|
||||||
|
raw_spin_lock_init(&dev->msi_lock);
|
||||||
INIT_LIST_HEAD(&dev->msi_list);
|
INIT_LIST_HEAD(&dev->msi_list);
|
||||||
#endif
|
#endif
|
||||||
INIT_LIST_HEAD(&dev->links.consumers);
|
INIT_LIST_HEAD(&dev->links.consumers);
|
||||||
|
@ -239,8 +239,8 @@ static void nbd_dev_remove(struct nbd_device *nbd)
|
|||||||
|
|
||||||
if (disk) {
|
if (disk) {
|
||||||
del_gendisk(disk);
|
del_gendisk(disk);
|
||||||
blk_mq_free_tag_set(&nbd->tag_set);
|
|
||||||
blk_cleanup_disk(disk);
|
blk_cleanup_disk(disk);
|
||||||
|
blk_mq_free_tag_set(&nbd->tag_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -818,6 +818,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
|
|||||||
{
|
{
|
||||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||||
|
|
||||||
|
/* don't abort one completed request */
|
||||||
|
if (blk_mq_request_completed(req))
|
||||||
|
return true;
|
||||||
|
|
||||||
mutex_lock(&cmd->lock);
|
mutex_lock(&cmd->lock);
|
||||||
cmd->status = BLK_STS_IOERR;
|
cmd->status = BLK_STS_IOERR;
|
||||||
mutex_unlock(&cmd->lock);
|
mutex_unlock(&cmd->lock);
|
||||||
@ -2004,15 +2008,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
|||||||
{
|
{
|
||||||
mutex_lock(&nbd->config_lock);
|
mutex_lock(&nbd->config_lock);
|
||||||
nbd_disconnect(nbd);
|
nbd_disconnect(nbd);
|
||||||
nbd_clear_sock(nbd);
|
sock_shutdown(nbd);
|
||||||
mutex_unlock(&nbd->config_lock);
|
|
||||||
/*
|
/*
|
||||||
* Make sure recv thread has finished, so it does not drop the last
|
* Make sure recv thread has finished, so it does not drop the last
|
||||||
* config ref and try to destroy the workqueue from inside the work
|
* config ref and try to destroy the workqueue from inside the work
|
||||||
* queue.
|
* queue. And this also ensure that we can safely call nbd_clear_que()
|
||||||
|
* to cancel the inflight I/Os.
|
||||||
*/
|
*/
|
||||||
if (nbd->recv_workq)
|
if (nbd->recv_workq)
|
||||||
flush_workqueue(nbd->recv_workq);
|
flush_workqueue(nbd->recv_workq);
|
||||||
|
nbd_clear_que(nbd);
|
||||||
|
nbd->task_setup = NULL;
|
||||||
|
mutex_unlock(&nbd->config_lock);
|
||||||
|
|
||||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||||
&nbd->config->runtime_flags))
|
&nbd->config->runtime_flags))
|
||||||
nbd_config_put(nbd);
|
nbd_config_put(nbd);
|
||||||
|
@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
|||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
if (nr_pages < 0)
|
if (nr_pages < 0)
|
||||||
return nr_pages;
|
return -EINVAL;
|
||||||
|
|
||||||
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
|
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
|
||||||
kaddr, pfn);
|
kaddr, pfn);
|
||||||
|
@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Although relocatable kernels can fix up the misalignment with respect to
|
* Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
|
||||||
* MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
|
* to provide space, and fail to zero it). Check for this condition by double
|
||||||
* sync with those recorded in the vmlinux when kaslr is disabled but the
|
* checking that the first and the last byte of the image are covered by the
|
||||||
* image required relocation anyway. Therefore retain 2M alignment unless
|
* same EFI memory map entry.
|
||||||
* KASLR is in use.
|
|
||||||
*/
|
*/
|
||||||
static u64 min_kimg_align(void)
|
static bool check_image_region(u64 base, u64 size)
|
||||||
{
|
{
|
||||||
return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
unsigned long map_size, desc_size, buff_size;
|
||||||
|
efi_memory_desc_t *memory_map;
|
||||||
|
struct efi_boot_memmap map;
|
||||||
|
efi_status_t status;
|
||||||
|
bool ret = false;
|
||||||
|
int map_offset;
|
||||||
|
|
||||||
|
map.map = &memory_map;
|
||||||
|
map.map_size = &map_size;
|
||||||
|
map.desc_size = &desc_size;
|
||||||
|
map.desc_ver = NULL;
|
||||||
|
map.key_ptr = NULL;
|
||||||
|
map.buff_size = &buff_size;
|
||||||
|
|
||||||
|
status = efi_get_memory_map(&map);
|
||||||
|
if (status != EFI_SUCCESS)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
|
||||||
|
efi_memory_desc_t *md = (void *)memory_map + map_offset;
|
||||||
|
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the region that covers base, and return whether
|
||||||
|
* it covers base+size bytes.
|
||||||
|
*/
|
||||||
|
if (base >= md->phys_addr && base < end) {
|
||||||
|
ret = (base + size) <= end;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
efi_bs_call(free_pool, memory_map);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||||
@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||||||
unsigned long kernel_size, kernel_memsize = 0;
|
unsigned long kernel_size, kernel_memsize = 0;
|
||||||
u32 phys_seed = 0;
|
u32 phys_seed = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Although relocatable kernels can fix up the misalignment with
|
||||||
|
* respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
|
||||||
|
* subtly out of sync with those recorded in the vmlinux when kaslr is
|
||||||
|
* disabled but the image required relocation anyway. Therefore retain
|
||||||
|
* 2M alignment if KASLR was explicitly disabled, even if it was not
|
||||||
|
* going to be activated to begin with.
|
||||||
|
*/
|
||||||
|
u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||||
if (!efi_nokaslr) {
|
if (!efi_nokaslr) {
|
||||||
status = efi_get_random_bytes(sizeof(phys_seed),
|
status = efi_get_random_bytes(sizeof(phys_seed),
|
||||||
@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||||||
if (image->image_base != _text)
|
if (image->image_base != _text)
|
||||||
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
|
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
|
||||||
|
|
||||||
|
if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
|
||||||
|
efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
|
||||||
|
EFI_KIMG_ALIGN >> 10);
|
||||||
|
|
||||||
kernel_size = _edata - _text;
|
kernel_size = _edata - _text;
|
||||||
kernel_memsize = kernel_size + (_end - _edata);
|
kernel_memsize = kernel_size + (_end - _edata);
|
||||||
*reserve_size = kernel_memsize;
|
*reserve_size = kernel_memsize;
|
||||||
@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||||||
* If KASLR is enabled, and we have some randomness available,
|
* If KASLR is enabled, and we have some randomness available,
|
||||||
* locate the kernel at a randomized offset in physical memory.
|
* locate the kernel at a randomized offset in physical memory.
|
||||||
*/
|
*/
|
||||||
status = efi_random_alloc(*reserve_size, min_kimg_align(),
|
status = efi_random_alloc(*reserve_size, min_kimg_align,
|
||||||
reserve_addr, phys_seed);
|
reserve_addr, phys_seed);
|
||||||
|
if (status != EFI_SUCCESS)
|
||||||
|
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
|
||||||
} else {
|
} else {
|
||||||
status = EFI_OUT_OF_RESOURCES;
|
status = EFI_OUT_OF_RESOURCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status != EFI_SUCCESS) {
|
if (status != EFI_SUCCESS) {
|
||||||
if (IS_ALIGNED((u64)_text, min_kimg_align())) {
|
if (!check_image_region((u64)_text, kernel_memsize)) {
|
||||||
|
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
|
||||||
|
} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
|
||||||
/*
|
/*
|
||||||
* Just execute from wherever we were loaded by the
|
* Just execute from wherever we were loaded by the
|
||||||
* UEFI PE/COFF loader if the alignment is suitable.
|
* UEFI PE/COFF loader if the alignment is suitable.
|
||||||
@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
||||||
ULONG_MAX, min_kimg_align());
|
ULONG_MAX, min_kimg_align);
|
||||||
|
|
||||||
if (status != EFI_SUCCESS) {
|
if (status != EFI_SUCCESS) {
|
||||||
efi_err("Failed to relocate kernel\n");
|
efi_err("Failed to relocate kernel\n");
|
||||||
|
@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
|
|||||||
|
|
||||||
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
|
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
|
||||||
(u64)ULONG_MAX);
|
(u64)ULONG_MAX);
|
||||||
|
if (region_end < size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
first_slot = round_up(md->phys_addr, align);
|
first_slot = round_up(md->phys_addr, align);
|
||||||
last_slot = round_down(region_end - size + 1, align);
|
last_slot = round_down(region_end - size + 1, align);
|
||||||
|
@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
|
|||||||
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
|
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper function to query RAS EEPROM address
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Return true if vbios supports ras rom address reporting
|
||||||
|
*/
|
||||||
|
bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
|
||||||
|
{
|
||||||
|
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||||
|
int index;
|
||||||
|
u16 data_offset, size;
|
||||||
|
union firmware_info *firmware_info;
|
||||||
|
u8 frev, crev;
|
||||||
|
|
||||||
|
if (i2c_address == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
*i2c_address = 0;
|
||||||
|
|
||||||
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||||
|
firmwareinfo);
|
||||||
|
|
||||||
|
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
|
||||||
|
index, &size, &frev, &crev, &data_offset)) {
|
||||||
|
/* support firmware_info 3.4 + */
|
||||||
|
if ((frev == 3 && crev >=4) || (frev > 3)) {
|
||||||
|
firmware_info = (union firmware_info *)
|
||||||
|
(mode_info->atom_context->bios + data_offset);
|
||||||
|
*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*i2c_address != 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
union smu_info {
|
union smu_info {
|
||||||
struct atom_smu_info_v3_1 v31;
|
struct atom_smu_info_v3_1 v31;
|
||||||
};
|
};
|
||||||
|
@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
|||||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||||
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
|
||||||
|
bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
|
||||||
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
|
||||||
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
|
||||||
|
@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
|||||||
ip->major, ip->minor,
|
ip->major, ip->minor,
|
||||||
ip->revision);
|
ip->revision);
|
||||||
|
|
||||||
|
if (le16_to_cpu(ip->hw_id) == VCN_HWID)
|
||||||
|
adev->vcn.num_vcn_inst++;
|
||||||
|
|
||||||
for (k = 0; k < num_base_address; k++) {
|
for (k = 0; k < num_base_address; k++) {
|
||||||
/*
|
/*
|
||||||
* convert the endianness of base addresses in place,
|
* convert the endianness of base addresses in place,
|
||||||
@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
struct binary_header *bhdr;
|
struct binary_header *bhdr;
|
||||||
struct harvest_table *harvest_info;
|
struct harvest_table *harvest_info;
|
||||||
int i;
|
int i, vcn_harvest_count = 0;
|
||||||
|
|
||||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||||
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
|
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
|
||||||
@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
|
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
|
||||||
case VCN_HWID:
|
case VCN_HWID:
|
||||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
vcn_harvest_count++;
|
||||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
|
||||||
break;
|
break;
|
||||||
case DMU_HWID:
|
case DMU_HWID:
|
||||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
||||||
@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
|
||||||
|
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||||
|
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||||
|
@ -1571,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||||||
pci_ignore_hotplug(pdev);
|
pci_ignore_hotplug(pdev);
|
||||||
pci_set_power_state(pdev, PCI_D3cold);
|
pci_set_power_state(pdev, PCI_D3cold);
|
||||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
||||||
|
} else if (amdgpu_device_supports_boco(drm_dev)) {
|
||||||
|
/* nothing to do */
|
||||||
} else if (amdgpu_device_supports_baco(drm_dev)) {
|
} else if (amdgpu_device_supports_baco(drm_dev)) {
|
||||||
amdgpu_device_baco_enter(drm_dev);
|
amdgpu_device_baco_enter(drm_dev);
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include <linux/bits.h>
|
#include <linux/bits.h>
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
#include "amdgpu_atomfirmware.h"
|
||||||
|
|
||||||
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
|
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
|
||||||
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
|
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
|
||||||
@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
|
|||||||
if (!i2c_addr)
|
if (!i2c_addr)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
|
||||||
|
return true;
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
*i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
|
*i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
|
||||||
|
@ -9605,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
|||||||
} else if (amdgpu_freesync_vid_mode && aconnector &&
|
} else if (amdgpu_freesync_vid_mode && aconnector &&
|
||||||
is_freesync_video_mode(&new_crtc_state->mode,
|
is_freesync_video_mode(&new_crtc_state->mode,
|
||||||
aconnector)) {
|
aconnector)) {
|
||||||
set_freesync_fixed_config(dm_new_crtc_state);
|
struct drm_display_mode *high_mode;
|
||||||
|
|
||||||
|
high_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||||
|
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
|
||||||
|
set_freesync_fixed_config(dm_new_crtc_state);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dm_atomic_get_state(state, &dm_state);
|
ret = dm_atomic_get_state(state, &dm_state);
|
||||||
|
@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
|
|||||||
}
|
}
|
||||||
pri_pipe->next_odm_pipe = sec_pipe;
|
pri_pipe->next_odm_pipe = sec_pipe;
|
||||||
sec_pipe->prev_odm_pipe = pri_pipe;
|
sec_pipe->prev_odm_pipe = pri_pipe;
|
||||||
ASSERT(sec_pipe->top_pipe == NULL);
|
|
||||||
|
|
||||||
if (!sec_pipe->top_pipe)
|
if (!sec_pipe->top_pipe)
|
||||||
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
|
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
|
||||||
|
@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
|
|||||||
uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
|
uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
|
||||||
uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
|
uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
|
||||||
uint8_t board_i2c_feature_slave_addr;
|
uint8_t board_i2c_feature_slave_addr;
|
||||||
uint8_t reserved3;
|
uint8_t ras_rom_i2c_slave_addr;
|
||||||
uint16_t bootup_mvddq_mv;
|
uint16_t bootup_mvddq_mv;
|
||||||
uint16_t bootup_mvpp_mv;
|
uint16_t bootup_mvpp_mv;
|
||||||
uint32_t zfbstartaddrin16mb;
|
uint32_t zfbstartaddrin16mb;
|
||||||
|
@ -111,7 +111,9 @@ typedef struct {
|
|||||||
uint32_t InWhisperMode : 1;
|
uint32_t InWhisperMode : 1;
|
||||||
uint32_t spare0 : 1;
|
uint32_t spare0 : 1;
|
||||||
uint32_t ZstateStatus : 4;
|
uint32_t ZstateStatus : 4;
|
||||||
uint32_t spare1 :12;
|
uint32_t spare1 : 4;
|
||||||
|
uint32_t DstateFun : 4;
|
||||||
|
uint32_t DstateDev : 4;
|
||||||
// MP1_EXT_SCRATCH2
|
// MP1_EXT_SCRATCH2
|
||||||
uint32_t P2JobHandler :24;
|
uint32_t P2JobHandler :24;
|
||||||
uint32_t RsmuPmiP2FinishedCnt : 8;
|
uint32_t RsmuPmiP2FinishedCnt : 8;
|
||||||
|
@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
|
|||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
|
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
|
||||||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
|
|
||||||
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
||||||
smu_baco->platform_support =
|
smu_baco->platform_support =
|
||||||
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
|
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
|
||||||
|
@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err3_out:
|
err3_out:
|
||||||
kfree(smu_table->clocks_table);
|
kfree(smu_table->watermarks_table);
|
||||||
err2_out:
|
err2_out:
|
||||||
kfree(smu_table->gpu_metrics_table);
|
kfree(smu_table->gpu_metrics_table);
|
||||||
err1_out:
|
err1_out:
|
||||||
|
@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
|
|||||||
|
|
||||||
switch (crtc_state->pipe_bpp) {
|
switch (crtc_state->pipe_bpp) {
|
||||||
case 18:
|
case 18:
|
||||||
val |= PIPEMISC_DITHER_6_BPC;
|
val |= PIPEMISC_6_BPC;
|
||||||
break;
|
break;
|
||||||
case 24:
|
case 24:
|
||||||
val |= PIPEMISC_DITHER_8_BPC;
|
val |= PIPEMISC_8_BPC;
|
||||||
break;
|
break;
|
||||||
case 30:
|
case 30:
|
||||||
val |= PIPEMISC_DITHER_10_BPC;
|
val |= PIPEMISC_10_BPC;
|
||||||
break;
|
break;
|
||||||
case 36:
|
case 36:
|
||||||
val |= PIPEMISC_DITHER_12_BPC;
|
/* Port output 12BPC defined for ADLP+ */
|
||||||
|
if (DISPLAY_VER(dev_priv) > 12)
|
||||||
|
val |= PIPEMISC_12_BPC_ADLP;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
MISSING_CASE(crtc_state->pipe_bpp);
|
MISSING_CASE(crtc_state->pipe_bpp);
|
||||||
@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
|
|||||||
|
|
||||||
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
|
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
|
||||||
|
|
||||||
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
|
switch (tmp & PIPEMISC_BPC_MASK) {
|
||||||
case PIPEMISC_DITHER_6_BPC:
|
case PIPEMISC_6_BPC:
|
||||||
return 18;
|
return 18;
|
||||||
case PIPEMISC_DITHER_8_BPC:
|
case PIPEMISC_8_BPC:
|
||||||
return 24;
|
return 24;
|
||||||
case PIPEMISC_DITHER_10_BPC:
|
case PIPEMISC_10_BPC:
|
||||||
return 30;
|
return 30;
|
||||||
case PIPEMISC_DITHER_12_BPC:
|
/*
|
||||||
return 36;
|
* PORT OUTPUT 12 BPC defined for ADLP+.
|
||||||
|
*
|
||||||
|
* TODO:
|
||||||
|
* For previous platforms with DSI interface, bits 5:7
|
||||||
|
* are used for storing pipe_bpp irrespective of dithering.
|
||||||
|
* Since the value of 12 BPC is not defined for these bits
|
||||||
|
* on older platforms, need to find a workaround for 12 BPC
|
||||||
|
* MIPI DSI HW readout.
|
||||||
|
*/
|
||||||
|
case PIPEMISC_12_BPC_ADLP:
|
||||||
|
if (DISPLAY_VER(dev_priv) > 12)
|
||||||
|
return 36;
|
||||||
|
fallthrough;
|
||||||
default:
|
default:
|
||||||
MISSING_CASE(tmp);
|
MISSING_CASE(tmp);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
|||||||
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_D(_MMIO(0xb110), D_BDW);
|
MMIO_D(_MMIO(0xb110), D_BDW);
|
||||||
|
MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
|
||||||
|
|
||||||
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
|
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
|
||||||
D_BDW_PLUS, NULL, force_nonpriv_write);
|
D_BDW_PLUS, NULL, force_nonpriv_write);
|
||||||
|
@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||||||
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
|
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
|
||||||
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
|
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
|
||||||
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
|
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
|
||||||
|
{RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
|
||||||
|
{RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
|
||||||
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
|
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
|
||||||
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
|
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
|
||||||
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
||||||
|
@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
|
|||||||
if (GRAPHICS_VER(m->i915) >= 12) {
|
if (GRAPHICS_VER(m->i915) >= 12) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
|
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
|
||||||
|
/*
|
||||||
|
* SFC_DONE resides in the VD forcewake domain, so it
|
||||||
|
* only exists if the corresponding VCS engine is
|
||||||
|
* present.
|
||||||
|
*/
|
||||||
|
if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
|
||||||
|
continue;
|
||||||
|
|
||||||
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
|
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
|
||||||
gt->sfc_done[i]);
|
gt->sfc_done[i]);
|
||||||
|
}
|
||||||
|
|
||||||
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
|
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
|
||||||
}
|
}
|
||||||
@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
|
|||||||
|
|
||||||
if (GRAPHICS_VER(i915) >= 12) {
|
if (GRAPHICS_VER(i915) >= 12) {
|
||||||
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
|
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
|
||||||
|
/*
|
||||||
|
* SFC_DONE resides in the VD forcewake domain, so it
|
||||||
|
* only exists if the corresponding VCS engine is
|
||||||
|
* present.
|
||||||
|
*/
|
||||||
|
if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
|
||||||
|
continue;
|
||||||
|
|
||||||
gt->sfc_done[i] =
|
gt->sfc_done[i] =
|
||||||
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
|
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
|
||||||
}
|
}
|
||||||
|
@ -6163,11 +6163,17 @@ enum {
|
|||||||
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
|
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
|
||||||
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
|
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
|
||||||
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
|
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
|
||||||
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
|
/*
|
||||||
#define PIPEMISC_DITHER_8_BPC (0 << 5)
|
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
|
||||||
#define PIPEMISC_DITHER_10_BPC (1 << 5)
|
* valid values of: 6, 8, 10 BPC.
|
||||||
#define PIPEMISC_DITHER_6_BPC (2 << 5)
|
* ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
|
||||||
#define PIPEMISC_DITHER_12_BPC (3 << 5)
|
* 6, 8, 10, 12 BPC.
|
||||||
|
*/
|
||||||
|
#define PIPEMISC_BPC_MASK (7 << 5)
|
||||||
|
#define PIPEMISC_8_BPC (0 << 5)
|
||||||
|
#define PIPEMISC_10_BPC (1 << 5)
|
||||||
|
#define PIPEMISC_6_BPC (2 << 5)
|
||||||
|
#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */
|
||||||
#define PIPEMISC_DITHER_ENABLE (1 << 4)
|
#define PIPEMISC_DITHER_ENABLE (1 << 4)
|
||||||
#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
|
#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
|
||||||
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
|
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
|
||||||
|
@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
|
|||||||
struct drm_crtc_state *crtc_state,
|
struct drm_crtc_state *crtc_state,
|
||||||
struct drm_connector_state *conn_state)
|
struct drm_connector_state *conn_state)
|
||||||
{
|
{
|
||||||
struct mtk_dpi *dpi = bridge->driver_private;
|
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
|
||||||
unsigned int out_bus_format;
|
unsigned int out_bus_format;
|
||||||
|
|
||||||
out_bus_format = bridge_state->output_bus_cfg.format;
|
out_bus_format = bridge_state->output_bus_cfg.format;
|
||||||
|
|
||||||
|
if (out_bus_format == MEDIA_BUS_FMT_FIXED)
|
||||||
|
if (dpi->conf->num_output_fmts)
|
||||||
|
out_bus_format = dpi->conf->output_fmts[0];
|
||||||
|
|
||||||
dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
|
dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
|
||||||
bridge_state->input_bus_cfg.format,
|
bridge_state->input_bus_cfg.format,
|
||||||
bridge_state->output_bus_cfg.format);
|
bridge_state->output_bus_cfg.format);
|
||||||
|
@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
|
|||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||||
const struct drm_plane_helper_funcs *plane_helper_funcs =
|
|
||||||
plane->helper_private;
|
|
||||||
|
|
||||||
if (!mtk_crtc->enabled)
|
if (!mtk_crtc->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
plane_helper_funcs->atomic_update(plane, state);
|
|
||||||
mtk_drm_crtc_update_config(mtk_crtc, false);
|
mtk_drm_crtc_update_config(mtk_crtc, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
|
|||||||
true, true);
|
true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
|
||||||
|
struct mtk_plane_state *mtk_plane_state)
|
||||||
|
{
|
||||||
|
struct drm_framebuffer *fb = new_state->fb;
|
||||||
|
struct drm_gem_object *gem;
|
||||||
|
struct mtk_drm_gem_obj *mtk_gem;
|
||||||
|
unsigned int pitch, format;
|
||||||
|
dma_addr_t addr;
|
||||||
|
|
||||||
|
gem = fb->obj[0];
|
||||||
|
mtk_gem = to_mtk_gem_obj(gem);
|
||||||
|
addr = mtk_gem->dma_addr;
|
||||||
|
pitch = fb->pitches[0];
|
||||||
|
format = fb->format->format;
|
||||||
|
|
||||||
|
addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
|
||||||
|
addr += (new_state->src.y1 >> 16) * pitch;
|
||||||
|
|
||||||
|
mtk_plane_state->pending.enable = true;
|
||||||
|
mtk_plane_state->pending.pitch = pitch;
|
||||||
|
mtk_plane_state->pending.format = format;
|
||||||
|
mtk_plane_state->pending.addr = addr;
|
||||||
|
mtk_plane_state->pending.x = new_state->dst.x1;
|
||||||
|
mtk_plane_state->pending.y = new_state->dst.y1;
|
||||||
|
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
|
||||||
|
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
|
||||||
|
mtk_plane_state->pending.rotation = new_state->rotation;
|
||||||
|
}
|
||||||
|
|
||||||
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
|
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
|
||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
|
|||||||
plane->state->src_h = new_state->src_h;
|
plane->state->src_h = new_state->src_h;
|
||||||
plane->state->src_w = new_state->src_w;
|
plane->state->src_w = new_state->src_w;
|
||||||
swap(plane->state->fb, new_state->fb);
|
swap(plane->state->fb, new_state->fb);
|
||||||
new_plane_state->pending.async_dirty = true;
|
|
||||||
|
|
||||||
|
mtk_plane_update_new_state(new_state, new_plane_state);
|
||||||
|
wmb(); /* Make sure the above parameters are set before update */
|
||||||
|
new_plane_state->pending.async_dirty = true;
|
||||||
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
|
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||||||
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
|
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
|
||||||
plane);
|
plane);
|
||||||
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
|
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
|
||||||
struct drm_crtc *crtc = new_state->crtc;
|
|
||||||
struct drm_framebuffer *fb = new_state->fb;
|
|
||||||
struct drm_gem_object *gem;
|
|
||||||
struct mtk_drm_gem_obj *mtk_gem;
|
|
||||||
unsigned int pitch, format;
|
|
||||||
dma_addr_t addr;
|
|
||||||
|
|
||||||
if (!crtc || WARN_ON(!fb))
|
if (!new_state->crtc || WARN_ON(!new_state->fb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!new_state->visible) {
|
if (!new_state->visible) {
|
||||||
@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gem = fb->obj[0];
|
mtk_plane_update_new_state(new_state, mtk_plane_state);
|
||||||
mtk_gem = to_mtk_gem_obj(gem);
|
|
||||||
addr = mtk_gem->dma_addr;
|
|
||||||
pitch = fb->pitches[0];
|
|
||||||
format = fb->format->format;
|
|
||||||
|
|
||||||
addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
|
|
||||||
addr += (new_state->src.y1 >> 16) * pitch;
|
|
||||||
|
|
||||||
mtk_plane_state->pending.enable = true;
|
|
||||||
mtk_plane_state->pending.pitch = pitch;
|
|
||||||
mtk_plane_state->pending.format = format;
|
|
||||||
mtk_plane_state->pending.addr = addr;
|
|
||||||
mtk_plane_state->pending.x = new_state->dst.x1;
|
|
||||||
mtk_plane_state->pending.y = new_state->dst.y1;
|
|
||||||
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
|
|
||||||
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
|
|
||||||
mtk_plane_state->pending.rotation = new_state->rotation;
|
|
||||||
wmb(); /* Make sure the above parameters are set before update */
|
wmb(); /* Make sure the above parameters are set before update */
|
||||||
mtk_plane_state->pending.dirty = true;
|
mtk_plane_state->pending.dirty = true;
|
||||||
}
|
}
|
||||||
|
@ -634,6 +634,11 @@
|
|||||||
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
|
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
|
||||||
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
|
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
|
||||||
|
|
||||||
|
/* osd1 HDR */
|
||||||
|
#define OSD1_HDR2_CTRL 0x38a0
|
||||||
|
#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13)
|
||||||
|
#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16)
|
||||||
|
|
||||||
/* osd2 scaler */
|
/* osd2 scaler */
|
||||||
#define OSD2_VSC_PHASE_STEP 0x3d00
|
#define OSD2_VSC_PHASE_STEP 0x3d00
|
||||||
#define OSD2_VSC_INI_PHASE 0x3d01
|
#define OSD2_VSC_INI_PHASE 0x3d01
|
||||||
|
@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
|
|||||||
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
|
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
|
||||||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
|
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
|
||||||
meson_viu_load_matrix(priv);
|
meson_viu_load_matrix(priv);
|
||||||
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
|
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
|
||||||
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
|
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
|
||||||
true);
|
true);
|
||||||
|
/* fix green/pink color distortion from vendor u-boot */
|
||||||
|
writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
|
||||||
|
OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
|
||||||
|
priv->io_base + _REG(OSD1_HDR2_CTRL));
|
||||||
|
}
|
||||||
|
|
||||||
/* Initialize OSD1 fifo control register */
|
/* Initialize OSD1 fifo control register */
|
||||||
reg = VIU_OSD_DDR_PRIORITY_URGENT |
|
reg = VIU_OSD_DDR_PRIORITY_URGENT |
|
||||||
|
@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
|
|||||||
if (count > 8192)
|
if (count > 8192)
|
||||||
count = 8192;
|
count = 8192;
|
||||||
|
|
||||||
tmp = kmalloc(count, GFP_KERNEL);
|
tmp = kzalloc(count, GFP_KERNEL);
|
||||||
if (tmp == NULL)
|
if (tmp == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
|
|||||||
|
|
||||||
ret = i2c_master_recv(client, tmp, count);
|
ret = i2c_master_recv(client, tmp, count);
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
|
if (copy_to_user(buf, tmp, ret))
|
||||||
|
ret = -EFAULT;
|
||||||
kfree(tmp);
|
kfree(tmp);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -231,6 +231,7 @@ config DMARD10
|
|||||||
|
|
||||||
config FXLS8962AF
|
config FXLS8962AF
|
||||||
tristate
|
tristate
|
||||||
|
depends on I2C || !I2C # cannot be built-in for modular I2C
|
||||||
|
|
||||||
config FXLS8962AF_I2C
|
config FXLS8962AF_I2C
|
||||||
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
|
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
|
||||||
@ -247,6 +248,7 @@ config FXLS8962AF_I2C
|
|||||||
config FXLS8962AF_SPI
|
config FXLS8962AF_SPI
|
||||||
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
|
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
|
||||||
depends on SPI
|
depends on SPI
|
||||||
|
depends on I2C || !I2C
|
||||||
select FXLS8962AF
|
select FXLS8962AF
|
||||||
select REGMAP_SPI
|
select REGMAP_SPI
|
||||||
help
|
help
|
||||||
|
@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
|
static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
|
||||||
|
@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
|
|||||||
|
|
||||||
adc_period = adc->auto_conversion_period;
|
adc_period = adc->auto_conversion_period;
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < 16; ++i) {
|
||||||
if (((1000 * (1 << i)) / 32) < adc_period)
|
if (((1000 * (1 << i)) / 32) >= adc_period)
|
||||||
continue;
|
break;
|
||||||
}
|
}
|
||||||
if (i > 0)
|
if (i > 0)
|
||||||
i--;
|
i--;
|
||||||
|
@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
|
|||||||
st->ring_xfer.tx_buf = &st->tx_buf[0];
|
st->ring_xfer.tx_buf = &st->tx_buf[0];
|
||||||
st->ring_xfer.rx_buf = &st->rx_buf[0];
|
st->ring_xfer.rx_buf = &st->rx_buf[0];
|
||||||
/* len will be set later */
|
/* len will be set later */
|
||||||
st->ring_xfer.cs_change = true;
|
|
||||||
|
|
||||||
spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
|
spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#include <linux/iio/trigger_consumer.h>
|
#include <linux/iio/trigger_consumer.h>
|
||||||
#include <linux/iio/triggered_buffer.h>
|
#include <linux/iio/triggered_buffer.h>
|
||||||
|
|
||||||
|
#include <linux/time.h>
|
||||||
|
|
||||||
#define HDC100X_REG_TEMP 0x00
|
#define HDC100X_REG_TEMP 0x00
|
||||||
#define HDC100X_REG_HUMIDITY 0x01
|
#define HDC100X_REG_HUMIDITY 0x01
|
||||||
|
|
||||||
@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
|
|||||||
struct iio_chan_spec const *chan)
|
struct iio_chan_spec const *chan)
|
||||||
{
|
{
|
||||||
struct i2c_client *client = data->client;
|
struct i2c_client *client = data->client;
|
||||||
int delay = data->adc_int_us[chan->address];
|
int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
|
||||||
int ret;
|
int ret;
|
||||||
__be16 val;
|
__be16 val;
|
||||||
|
|
||||||
@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
|
|||||||
struct iio_dev *indio_dev = pf->indio_dev;
|
struct iio_dev *indio_dev = pf->indio_dev;
|
||||||
struct hdc100x_data *data = iio_priv(indio_dev);
|
struct hdc100x_data *data = iio_priv(indio_dev);
|
||||||
struct i2c_client *client = data->client;
|
struct i2c_client *client = data->client;
|
||||||
int delay = data->adc_int_us[0] + data->adc_int_us[1];
|
int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* dual read starts at temp register */
|
/* dual read starts at temp register */
|
||||||
|
@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* check if the device has rst pin low */
|
/* check if the device has rst pin low */
|
||||||
gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
|
gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
|
||||||
if (IS_ERR(gpio))
|
if (IS_ERR(gpio))
|
||||||
return PTR_ERR(gpio);
|
return PTR_ERR(gpio);
|
||||||
|
|
||||||
if (gpio) {
|
if (gpio) {
|
||||||
gpiod_set_value_cansleep(gpio, 1);
|
|
||||||
msleep(10);
|
msleep(10);
|
||||||
/* bring device out of reset */
|
/* bring device out of reset */
|
||||||
gpiod_set_value_cansleep(gpio, 0);
|
gpiod_set_value_cansleep(gpio, 0);
|
||||||
|
@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||||||
u32 *cqb = NULL;
|
u32 *cqb = NULL;
|
||||||
void *cqc;
|
void *cqc;
|
||||||
int cqe_size;
|
int cqe_size;
|
||||||
unsigned int irqn;
|
|
||||||
int eqn;
|
int eqn;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||||||
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
|
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
|
err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_cqb;
|
goto err_cqb;
|
||||||
|
|
||||||
@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||||||
goto err_cqb;
|
goto err_cqb;
|
||||||
|
|
||||||
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
|
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
|
||||||
cq->mcq.irqn = irqn;
|
|
||||||
if (udata)
|
if (udata)
|
||||||
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
|
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
|
||||||
else
|
else
|
||||||
|
@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||||||
struct mlx5_ib_dev *dev;
|
struct mlx5_ib_dev *dev;
|
||||||
int user_vector;
|
int user_vector;
|
||||||
int dev_eqn;
|
int dev_eqn;
|
||||||
unsigned int irqn;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (uverbs_copy_from(&user_vector, attrs,
|
if (uverbs_copy_from(&user_vector, attrs,
|
||||||
@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||||||
return PTR_ERR(c);
|
return PTR_ERR(c);
|
||||||
dev = to_mdev(c->ibucontext.device);
|
dev = to_mdev(c->ibucontext.device);
|
||||||
|
|
||||||
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
|
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||||||
family = AF_INET6;
|
family = AF_INET6;
|
||||||
|
|
||||||
if (bareudp->ethertype == htons(ETH_P_IP)) {
|
if (bareudp->ethertype == htons(ETH_P_IP)) {
|
||||||
struct iphdr *iphdr;
|
__u8 ipversion;
|
||||||
|
|
||||||
iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
|
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
|
||||||
if (iphdr->version == 4) {
|
sizeof(ipversion))) {
|
||||||
proto = bareudp->ethertype;
|
bareudp->dev->stats.rx_dropped++;
|
||||||
} else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
|
goto drop;
|
||||||
|
}
|
||||||
|
ipversion >>= 4;
|
||||||
|
|
||||||
|
if (ipversion == 4) {
|
||||||
|
proto = htons(ETH_P_IP);
|
||||||
|
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
|
||||||
proto = htons(ETH_P_IPV6);
|
proto = htons(ETH_P_IPV6);
|
||||||
} else {
|
} else {
|
||||||
bareudp->dev->stats.rx_dropped++;
|
bareudp->dev->stats.rx_dropped++;
|
||||||
|
@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
|
|||||||
FIELD_PREP(TDCR_TDCO_MASK, tdco));
|
FIELD_PREP(TDCR_TDCO_MASK, tdco));
|
||||||
}
|
}
|
||||||
|
|
||||||
reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
|
reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
|
||||||
FIELD_PREP(NBTP_NSJW_MASK, sjw) |
|
FIELD_PREP(DBTP_DSJW_MASK, sjw) |
|
||||||
FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
|
FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
|
||||||
FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
|
FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
|
||||||
|
|
||||||
m_can_write(cdev, M_CAN_DBTP, reg_btp);
|
m_can_write(cdev, M_CAN_DBTP, reg_btp);
|
||||||
}
|
}
|
||||||
|
@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
{
|
{
|
||||||
struct hellcreek *hellcreek = ds->priv;
|
struct hellcreek *hellcreek = ds->priv;
|
||||||
u16 entries;
|
u16 entries;
|
||||||
|
int ret = 0;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
mutex_lock(&hellcreek->reg_lock);
|
mutex_lock(&hellcreek->reg_lock);
|
||||||
@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
if (!(entry.portmask & BIT(port)))
|
if (!(entry.portmask & BIT(port)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
cb(entry.mac, 0, entry.is_static, data);
|
ret = cb(entry.mac, 0, entry.is_static, data);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&hellcreek->reg_lock);
|
mutex_unlock(&hellcreek->reg_lock);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
|
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
|
||||||
|
@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
||||||
int portmap, void *ctx);
|
int portmap, void *ctx);
|
||||||
|
|
||||||
static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
||||||
{
|
{
|
||||||
int i;
|
int ret = 0, i;
|
||||||
|
|
||||||
mutex_lock(&chip->alr_mutex);
|
mutex_lock(&chip->alr_mutex);
|
||||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||||
@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
|||||||
LAN9303_ALR_DAT1_PORT_BITOFFS;
|
LAN9303_ALR_DAT1_PORT_BITOFFS;
|
||||||
portmap = alrport_2_portmap[alrport];
|
portmap = alrport_2_portmap[alrport];
|
||||||
|
|
||||||
cb(chip, dat0, dat1, portmap, ctx);
|
ret = cb(chip, dat0, dat1, portmap, ctx);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||||
LAN9303_ALR_CMD_GET_NEXT);
|
LAN9303_ALR_CMD_GET_NEXT);
|
||||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
|
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
|
||||||
}
|
}
|
||||||
mutex_unlock(&chip->alr_mutex);
|
mutex_unlock(&chip->alr_mutex);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
|
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
|
||||||
@ -606,18 +610,20 @@ struct del_port_learned_ctx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Clear learned (non-static) entry on given port */
|
/* Clear learned (non-static) entry on given port */
|
||||||
static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
||||||
u32 dat1, int portmap, void *ctx)
|
u32 dat1, int portmap, void *ctx)
|
||||||
{
|
{
|
||||||
struct del_port_learned_ctx *del_ctx = ctx;
|
struct del_port_learned_ctx *del_ctx = ctx;
|
||||||
int port = del_ctx->port;
|
int port = del_ctx->port;
|
||||||
|
|
||||||
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
|
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
/* learned entries has only one port, we can just delete */
|
/* learned entries has only one port, we can just delete */
|
||||||
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
|
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
|
||||||
lan9303_alr_make_entry_raw(chip, dat0, dat1);
|
lan9303_alr_make_entry_raw(chip, dat0, dat1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct port_fdb_dump_ctx {
|
struct port_fdb_dump_ctx {
|
||||||
@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
|
|||||||
dsa_fdb_dump_cb_t *cb;
|
dsa_fdb_dump_cb_t *cb;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
||||||
u32 dat1, int portmap, void *ctx)
|
u32 dat1, int portmap, void *ctx)
|
||||||
{
|
{
|
||||||
struct port_fdb_dump_ctx *dump_ctx = ctx;
|
struct port_fdb_dump_ctx *dump_ctx = ctx;
|
||||||
u8 mac[ETH_ALEN];
|
u8 mac[ETH_ALEN];
|
||||||
bool is_static;
|
bool is_static;
|
||||||
|
|
||||||
if ((BIT(dump_ctx->port) & portmap) == 0)
|
if ((BIT(dump_ctx->port) & portmap) == 0)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
alr_reg_to_mac(dat0, dat1, mac);
|
alr_reg_to_mac(dat0, dat1, mac);
|
||||||
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
|
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
|
||||||
dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set a static ALR entry. Delete entry if port_map is zero */
|
/* Set a static ALR entry. Delete entry if port_map is zero */
|
||||||
@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
};
|
};
|
||||||
|
|
||||||
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
|
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
|
||||||
lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
|
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
|
||||||
|
@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
addr[1] = mac_bridge.key[2] & 0xff;
|
addr[1] = mac_bridge.key[2] & 0xff;
|
||||||
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
|
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
|
||||||
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
|
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
|
||||||
if (mac_bridge.val[0] & BIT(port))
|
if (mac_bridge.val[0] & BIT(port)) {
|
||||||
cb(addr, 0, true, data);
|
err = cb(addr, 0, true, data);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
|
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
|
||||||
cb(addr, 0, false, data);
|
err = cb(addr, 0, false, data);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
|
|||||||
shifts = ksz8->shifts;
|
shifts = ksz8->shifts;
|
||||||
|
|
||||||
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
|
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
|
||||||
addr *= dev->phy_port_cnt;
|
addr *= 4;
|
||||||
for (i = 0; i < dev->phy_port_cnt; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
dev->vlan_cache[addr + i].table[0] = (u16)data;
|
dev->vlan_cache[addr + i].table[0] = (u16)data;
|
||||||
data >>= shifts[VLAN_TABLE];
|
data >>= shifts[VLAN_TABLE];
|
||||||
}
|
}
|
||||||
@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
|
|||||||
u64 buf;
|
u64 buf;
|
||||||
|
|
||||||
data = (u16 *)&buf;
|
data = (u16 *)&buf;
|
||||||
addr = vid / dev->phy_port_cnt;
|
addr = vid / 4;
|
||||||
index = vid & 3;
|
index = vid & 3;
|
||||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||||
*vlan = data[index];
|
*vlan = data[index];
|
||||||
@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
|
|||||||
u64 buf;
|
u64 buf;
|
||||||
|
|
||||||
data = (u16 *)&buf;
|
data = (u16 *)&buf;
|
||||||
addr = vid / dev->phy_port_cnt;
|
addr = vid / 4;
|
||||||
index = vid & 3;
|
index = vid & 3;
|
||||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||||
data[index] = vlan;
|
data[index] = vlan;
|
||||||
@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
|
|||||||
if (ksz_is_ksz88x3(dev))
|
if (ksz_is_ksz88x3(dev))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
|
/* Discard packets with VID not enabled on the switch */
|
||||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
|
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
|
||||||
|
|
||||||
|
/* Discard packets with VID not enabled on the ingress port */
|
||||||
|
for (port = 0; port < dev->phy_port_cnt; ++port)
|
||||||
|
ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
|
||||||
|
flag);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
|
||||||
|
{
|
||||||
|
if (ksz_is_ksz88x3(dev)) {
|
||||||
|
ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
|
||||||
|
0x03 << (4 - 2 * port), state);
|
||||||
|
} else {
|
||||||
|
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan,
|
const struct switchdev_obj_port_vlan *vlan,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||||
struct ksz_device *dev = ds->priv;
|
struct ksz_device *dev = ds->priv;
|
||||||
|
struct ksz_port *p = &dev->ports[port];
|
||||||
u16 data, new_pvid = 0;
|
u16 data, new_pvid = 0;
|
||||||
u8 fid, member, valid;
|
u8 fid, member, valid;
|
||||||
|
|
||||||
if (ksz_is_ksz88x3(dev))
|
if (ksz_is_ksz88x3(dev))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
/* If a VLAN is added with untagged flag different from the
|
||||||
|
* port's Remove Tag flag, we need to change the latter.
|
||||||
|
* Ignore VID 0, which is always untagged.
|
||||||
|
* Ignore CPU port, which will always be tagged.
|
||||||
|
*/
|
||||||
|
if (untagged != p->remove_tag && vlan->vid != 0 &&
|
||||||
|
port != dev->cpu_port) {
|
||||||
|
unsigned int vid;
|
||||||
|
|
||||||
|
/* Reject attempts to add a VLAN that requires the
|
||||||
|
* Remove Tag flag to be changed, unless there are no
|
||||||
|
* other VLANs currently configured.
|
||||||
|
*/
|
||||||
|
for (vid = 1; vid < dev->num_vlans; ++vid) {
|
||||||
|
/* Skip the VID we are going to add or reconfigure */
|
||||||
|
if (vid == vlan->vid)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
|
||||||
|
&fid, &member, &valid);
|
||||||
|
if (valid && (member & BIT(port)))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||||
|
p->remove_tag = untagged;
|
||||||
|
}
|
||||||
|
|
||||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||||
@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||||||
u16 vid;
|
u16 vid;
|
||||||
|
|
||||||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
|
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
|
||||||
vid &= 0xfff;
|
vid &= ~VLAN_VID_MASK;
|
||||||
vid |= new_pvid;
|
vid |= new_pvid;
|
||||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
|
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
|
||||||
|
|
||||||
|
ksz8_port_enable_pvid(dev, port, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||||||
static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan)
|
const struct switchdev_obj_port_vlan *vlan)
|
||||||
{
|
{
|
||||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
|
||||||
struct ksz_device *dev = ds->priv;
|
struct ksz_device *dev = ds->priv;
|
||||||
u16 data, pvid, new_pvid = 0;
|
u16 data, pvid;
|
||||||
u8 fid, member, valid;
|
u8 fid, member, valid;
|
||||||
|
|
||||||
if (ksz_is_ksz88x3(dev))
|
if (ksz_is_ksz88x3(dev))
|
||||||
@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||||||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
|
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
|
||||||
pvid = pvid & 0xFFF;
|
pvid = pvid & 0xFFF;
|
||||||
|
|
||||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
|
||||||
|
|
||||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||||
|
|
||||||
@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||||||
valid = 0;
|
valid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pvid == vlan->vid)
|
|
||||||
new_pvid = 1;
|
|
||||||
|
|
||||||
ksz8_to_vlan(dev, fid, member, valid, &data);
|
ksz8_to_vlan(dev, fid, member, valid, &data);
|
||||||
ksz8_w_vlan_table(dev, vlan->vid, data);
|
ksz8_w_vlan_table(dev, vlan->vid, data);
|
||||||
|
|
||||||
if (new_pvid != pvid)
|
if (pvid == vlan->vid)
|
||||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
|
ksz8_port_enable_pvid(dev, port, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
|
|||||||
|
|
||||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
|
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
|
||||||
|
|
||||||
|
if (!ksz_is_ksz88x3(dev))
|
||||||
|
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
|
||||||
|
|
||||||
/* set broadcast storm protection 10% rate */
|
/* set broadcast storm protection 10% rate */
|
||||||
regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
|
regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
|
||||||
BROADCAST_STORM_RATE,
|
BROADCAST_STORM_RATE,
|
||||||
@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
|
|||||||
/* set the real number of ports */
|
/* set the real number of ports */
|
||||||
dev->ds->num_ports = dev->port_cnt;
|
dev->ds->num_ports = dev->port_cnt;
|
||||||
|
|
||||||
|
/* We rely on software untagging on the CPU port, so that we
|
||||||
|
* can support both tagged and untagged VLANs
|
||||||
|
*/
|
||||||
|
dev->ds->untag_bridge_pvid = true;
|
||||||
|
|
||||||
|
/* VLAN filtering is partly controlled by the global VLAN
|
||||||
|
* Enable flag
|
||||||
|
*/
|
||||||
|
dev->ds->vlan_filtering_is_global = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -631,6 +631,10 @@
|
|||||||
#define REG_PORT_4_OUT_RATE_3 0xEE
|
#define REG_PORT_4_OUT_RATE_3 0xEE
|
||||||
#define REG_PORT_5_OUT_RATE_3 0xFE
|
#define REG_PORT_5_OUT_RATE_3 0xFE
|
||||||
|
|
||||||
|
/* 88x3 specific */
|
||||||
|
|
||||||
|
#define REG_SW_INSERT_SRC_PVID 0xC2
|
||||||
|
|
||||||
/* PME */
|
/* PME */
|
||||||
|
|
||||||
#define SW_PME_OUTPUT_ENABLE BIT(1)
|
#define SW_PME_OUTPUT_ENABLE BIT(1)
|
||||||
|
@ -27,6 +27,7 @@ struct ksz_port_mib {
|
|||||||
struct ksz_port {
|
struct ksz_port {
|
||||||
u16 member;
|
u16 member;
|
||||||
u16 vid_member;
|
u16 vid_member;
|
||||||
|
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
|
||||||
int stp_state;
|
int stp_state;
|
||||||
struct phy_device phydev;
|
struct phy_device phydev;
|
||||||
|
|
||||||
@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
|
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
|
||||||
if (!ret) {
|
if (!ret)
|
||||||
/* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
|
*val = (u64)value[0] << 32 | value[1];
|
||||||
value[0] = swab32(value[0]);
|
|
||||||
value[1] = swab32(value[1]);
|
|
||||||
*val = swab64((u64)*value);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
|
|||||||
MIB_DESC(2, 0x48, "TxBytes"),
|
MIB_DESC(2, 0x48, "TxBytes"),
|
||||||
MIB_DESC(1, 0x60, "RxDrop"),
|
MIB_DESC(1, 0x60, "RxDrop"),
|
||||||
MIB_DESC(1, 0x64, "RxFiltering"),
|
MIB_DESC(1, 0x64, "RxFiltering"),
|
||||||
|
MIB_DESC(1, 0x68, "RxUnicast"),
|
||||||
MIB_DESC(1, 0x6c, "RxMulticast"),
|
MIB_DESC(1, 0x6c, "RxMulticast"),
|
||||||
MIB_DESC(1, 0x70, "RxBroadcast"),
|
MIB_DESC(1, 0x70, "RxBroadcast"),
|
||||||
MIB_DESC(1, 0x74, "RxAlignErr"),
|
MIB_DESC(1, 0x74, "RxAlignErr"),
|
||||||
|
@ -101,6 +101,23 @@
|
|||||||
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
|
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
|
||||||
AR9331_SW_PORT_STATUS_SPEED_M)
|
AR9331_SW_PORT_STATUS_SPEED_M)
|
||||||
|
|
||||||
|
#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
|
||||||
|
#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
|
||||||
|
#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
|
||||||
|
|
||||||
|
#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
|
||||||
|
#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
|
||||||
|
#define AR9331_SW_8021Q_MODE_SECURE 3
|
||||||
|
#define AR9331_SW_8021Q_MODE_CHECK 2
|
||||||
|
#define AR9331_SW_8021Q_MODE_FALLBACK 1
|
||||||
|
#define AR9331_SW_8021Q_MODE_NONE 0
|
||||||
|
#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
|
||||||
|
|
||||||
/* MIB registers */
|
/* MIB registers */
|
||||||
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
|
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
|
||||||
|
|
||||||
@ -371,11 +388,59 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
|
||||||
|
{
|
||||||
|
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||||
|
struct regmap *regmap = priv->regmap;
|
||||||
|
u32 port_mask, port_ctrl, val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Generate default port settings */
|
||||||
|
port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
|
||||||
|
AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
|
||||||
|
|
||||||
|
if (dsa_is_cpu_port(ds, port)) {
|
||||||
|
/* CPU port should be allowed to communicate with all user
|
||||||
|
* ports.
|
||||||
|
*/
|
||||||
|
port_mask = dsa_user_ports(ds);
|
||||||
|
/* Enable Atheros header on CPU port. This will allow us
|
||||||
|
* communicate with each port separately
|
||||||
|
*/
|
||||||
|
port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
|
||||||
|
} else if (dsa_is_user_port(ds, port)) {
|
||||||
|
/* User ports should communicate only with the CPU port.
|
||||||
|
*/
|
||||||
|
port_mask = BIT(dsa_upstream_port(ds, port));
|
||||||
|
} else {
|
||||||
|
/* Other ports do not need to communicate at all */
|
||||||
|
port_mask = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
|
||||||
|
AR9331_SW_8021Q_MODE_NONE) |
|
||||||
|
FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
|
||||||
|
|
||||||
|
ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
|
||||||
|
if (ret)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
error:
|
||||||
|
dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int ar9331_sw_setup(struct dsa_switch *ds)
|
static int ar9331_sw_setup(struct dsa_switch *ds)
|
||||||
{
|
{
|
||||||
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||||
struct regmap *regmap = priv->regmap;
|
struct regmap *regmap = priv->regmap;
|
||||||
int ret;
|
int ret, i;
|
||||||
|
|
||||||
ret = ar9331_sw_reset(priv);
|
ret = ar9331_sw_reset(priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
for (i = 0; i < ds->num_ports; i++) {
|
||||||
|
ret = ar9331_sw_setup_port(ds, i);
|
||||||
|
if (ret)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
ds->configure_vlan_while_not_filtering = false;
|
ds->configure_vlan_while_not_filtering = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1635,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
/* We need to hide the dsa_8021q VLANs from the user. */
|
/* We need to hide the dsa_8021q VLANs from the user. */
|
||||||
if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
|
if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
|
||||||
l2_lookup.vlanid = 0;
|
l2_lookup.vlanid = 0;
|
||||||
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3185,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sja1105_devlink_teardown(ds);
|
sja1105_devlink_teardown(ds);
|
||||||
|
sja1105_mdiobus_unregister(ds);
|
||||||
sja1105_flower_teardown(ds);
|
sja1105_flower_teardown(ds);
|
||||||
sja1105_tas_teardown(ds);
|
sja1105_tas_teardown(ds);
|
||||||
sja1105_ptp_clock_unregister(ds);
|
sja1105_ptp_clock_unregister(ds);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user