mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-30 14:52:17 +00:00
Mike looks like T3Q drew something wit his dick
This commit is contained in:
parent
0fab2cb14e
commit
159f821fa8
@ -22,9 +22,14 @@ typedef __builtin_va_list va_list;
|
||||
#define va_arg(v, l) __builtin_va_arg(v, l)
|
||||
#define va_copy(d, s) __builtin_va_copy(d, s)
|
||||
#else
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/stdarg.h>
|
||||
#endif
|
||||
#endif
|
||||
#else
|
||||
/* Used to build acpi tools */
|
||||
#include <stdarg.h>
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* ACPI_USE_BUILTIN_STDARG */
|
||||
#endif /* ! va_arg */
|
||||
|
||||
#define ACPI_INLINE __inline__
|
||||
|
||||
|
@ -929,8 +929,11 @@ struct bpf_array_aux {
|
||||
* stored in the map to make sure that all callers and callees have
|
||||
* the same prog type and JITed flag.
|
||||
*/
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
} owner;
|
||||
/* Programs with direct jumps into programs part of this array. */
|
||||
struct list_head poke_progs;
|
||||
struct bpf_map *map;
|
||||
|
@ -72,6 +72,8 @@ enum cpuhp_state {
|
||||
CPUHP_SLUB_DEAD,
|
||||
CPUHP_DEBUG_OBJ_DEAD,
|
||||
CPUHP_MM_WRITEBACK_DEAD,
|
||||
/* Must be after CPUHP_MM_VMSTAT_DEAD */
|
||||
CPUHP_MM_DEMOTION_DEAD,
|
||||
CPUHP_MM_VMSTAT_DEAD,
|
||||
CPUHP_SOFTIRQ_DEAD,
|
||||
CPUHP_NET_MVNETA_DEAD,
|
||||
@ -240,6 +242,8 @@ enum cpuhp_state {
|
||||
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||
/* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
|
||||
CPUHP_AP_MM_DEMOTION_ONLINE,
|
||||
CPUHP_AP_X86_HPET_ONLINE,
|
||||
CPUHP_AP_X86_KVM_CLK_ONLINE,
|
||||
CPUHP_AP_DTPM_CPU_ONLINE,
|
||||
|
@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_UM) || defined(CONFIG_IA64)
|
||||
#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
|
||||
/*
|
||||
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
|
||||
* extra segments containing the gate DSO contents. Dumping its
|
||||
|
@ -1051,6 +1051,7 @@ extern int bpf_jit_enable;
|
||||
extern int bpf_jit_harden;
|
||||
extern int bpf_jit_kallsyms;
|
||||
extern long bpf_jit_limit;
|
||||
extern long bpf_jit_limit_max;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
|
@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
|
||||
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
|
||||
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
|
||||
#else
|
||||
#define hotplug_memory_notifier(fn, pri) ({ 0; })
|
||||
static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/* These aren't inline functions due to a GCC bug. */
|
||||
#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
|
||||
#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
|
||||
|
@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
|
||||
|
@ -171,6 +171,15 @@ enum pageflags {
|
||||
/* Compound pages. Stored in first tail page's flags */
|
||||
PG_double_map = PG_workingset,
|
||||
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
/*
|
||||
* Compound pages. Stored in first tail page's flags.
|
||||
* Indicates that at least one subpage is hwpoisoned in the
|
||||
* THP.
|
||||
*/
|
||||
PG_has_hwpoisoned = PG_mappedtodisk,
|
||||
#endif
|
||||
|
||||
/* non-lru isolated movable page */
|
||||
PG_isolated = PG_reclaim,
|
||||
|
||||
@ -668,6 +677,20 @@ PAGEFLAG_FALSE(DoubleMap)
|
||||
TESTSCFLAG_FALSE(DoubleMap)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
/*
|
||||
* PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
|
||||
* compound page.
|
||||
*
|
||||
* This flag is set by hwpoison handler. Cleared by THP split or free page.
|
||||
*/
|
||||
PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
|
||||
TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
|
||||
#else
|
||||
PAGEFLAG_FALSE(HasHWPoisoned)
|
||||
TESTSCFLAG_FALSE(HasHWPoisoned)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if a page is currently marked HWPoisoned. Note that this check is
|
||||
* best effort only and inherently racy: there is no way to synchronize with
|
||||
|
@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
|
||||
mapping = (struct address_space *)
|
||||
((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
|
||||
|
||||
if (mapping != page->mapping)
|
||||
if (!mapping || mapping != page->mapping)
|
||||
return false;
|
||||
|
||||
return mapping->a_ops == &secretmem_aops;
|
||||
|
@ -128,6 +128,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
|
||||
struct sk_msg *msg, u32 bytes);
|
||||
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
||||
int len, int flags);
|
||||
bool sk_msg_is_readable(struct sock *sk);
|
||||
|
||||
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
|
||||
{
|
||||
|
@ -16,23 +16,8 @@
|
||||
* When function tracing occurs, the following steps are made:
|
||||
* If arch does not support a ftrace feature:
|
||||
* call internal function (uses INTERNAL bits) which calls...
|
||||
* If callback is registered to the "global" list, the list
|
||||
* function is called and recursion checks the GLOBAL bits.
|
||||
* then this function calls...
|
||||
* The function callback, which can use the FTRACE bits to
|
||||
* check for recursion.
|
||||
*
|
||||
* Now if the arch does not support a feature, and it calls
|
||||
* the global list function which calls the ftrace callback
|
||||
* all three of these steps will do a recursion protection.
|
||||
* There's no reason to do one if the previous caller already
|
||||
* did. The recursion that we are protecting against will
|
||||
* go through the same steps again.
|
||||
*
|
||||
* To prevent the multiple recursion checks, if a recursion
|
||||
* bit is set that is higher than the MAX bit of the current
|
||||
* check, then we know that the check was made by the previous
|
||||
* caller, and we can skip the current check.
|
||||
*/
|
||||
enum {
|
||||
/* Function recursion bits */
|
||||
@ -40,12 +25,14 @@ enum {
|
||||
TRACE_FTRACE_NMI_BIT,
|
||||
TRACE_FTRACE_IRQ_BIT,
|
||||
TRACE_FTRACE_SIRQ_BIT,
|
||||
TRACE_FTRACE_TRANSITION_BIT,
|
||||
|
||||
/* INTERNAL_BITs must be greater than FTRACE_BITs */
|
||||
/* Internal use recursion bits */
|
||||
TRACE_INTERNAL_BIT,
|
||||
TRACE_INTERNAL_NMI_BIT,
|
||||
TRACE_INTERNAL_IRQ_BIT,
|
||||
TRACE_INTERNAL_SIRQ_BIT,
|
||||
TRACE_INTERNAL_TRANSITION_BIT,
|
||||
|
||||
TRACE_BRANCH_BIT,
|
||||
/*
|
||||
@ -86,12 +73,6 @@ enum {
|
||||
*/
|
||||
TRACE_GRAPH_NOTRACE_BIT,
|
||||
|
||||
/*
|
||||
* When transitioning between context, the preempt_count() may
|
||||
* not be correct. Allow for a single recursion to cover this case.
|
||||
*/
|
||||
TRACE_TRANSITION_BIT,
|
||||
|
||||
/* Used to prevent recursion recording from recursing. */
|
||||
TRACE_RECORD_RECURSION_BIT,
|
||||
};
|
||||
@ -113,12 +94,10 @@ enum {
|
||||
#define TRACE_CONTEXT_BITS 4
|
||||
|
||||
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
|
||||
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
|
||||
|
||||
#define TRACE_LIST_START TRACE_INTERNAL_BIT
|
||||
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
|
||||
|
||||
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
|
||||
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
|
||||
|
||||
/*
|
||||
* Used for setting context
|
||||
@ -132,6 +111,7 @@ enum {
|
||||
TRACE_CTX_IRQ,
|
||||
TRACE_CTX_SOFTIRQ,
|
||||
TRACE_CTX_NORMAL,
|
||||
TRACE_CTX_TRANSITION,
|
||||
};
|
||||
|
||||
static __always_inline int trace_get_context_bit(void)
|
||||
@ -160,45 +140,34 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
|
||||
#endif
|
||||
|
||||
static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
|
||||
int start, int max)
|
||||
int start)
|
||||
{
|
||||
unsigned int val = READ_ONCE(current->trace_recursion);
|
||||
int bit;
|
||||
|
||||
/* A previous recursion check was made */
|
||||
if ((val & TRACE_CONTEXT_MASK) > max)
|
||||
return 0;
|
||||
|
||||
bit = trace_get_context_bit() + start;
|
||||
if (unlikely(val & (1 << bit))) {
|
||||
/*
|
||||
* It could be that preempt_count has not been updated during
|
||||
* a switch between contexts. Allow for a single recursion.
|
||||
*/
|
||||
bit = TRACE_TRANSITION_BIT;
|
||||
bit = TRACE_CTX_TRANSITION + start;
|
||||
if (val & (1 << bit)) {
|
||||
do_ftrace_record_recursion(ip, pip);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
/* Normal check passed, clear the transition to allow it again */
|
||||
val &= ~(1 << TRACE_TRANSITION_BIT);
|
||||
}
|
||||
|
||||
val |= 1 << bit;
|
||||
current->trace_recursion = val;
|
||||
barrier();
|
||||
|
||||
return bit + 1;
|
||||
return bit;
|
||||
}
|
||||
|
||||
static __always_inline void trace_clear_recursion(int bit)
|
||||
{
|
||||
if (!bit)
|
||||
return;
|
||||
|
||||
barrier();
|
||||
bit--;
|
||||
trace_recursion_clear(bit);
|
||||
}
|
||||
|
||||
@ -214,7 +183,7 @@ static __always_inline void trace_clear_recursion(int bit)
|
||||
static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
|
||||
unsigned long parent_ip)
|
||||
{
|
||||
return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
|
||||
return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
|
||||
|
||||
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
|
||||
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
|
||||
long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
|
||||
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
|
||||
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
|
||||
|
||||
static inline void set_rlimit_ucount_max(struct user_namespace *ns,
|
||||
|
@ -10,6 +10,7 @@
|
||||
#define __UAPI_MCTP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
|
||||
typedef __u8 mctp_eid_t;
|
||||
|
||||
@ -18,11 +19,13 @@ struct mctp_addr {
|
||||
};
|
||||
|
||||
struct sockaddr_mctp {
|
||||
unsigned short int smctp_family;
|
||||
int smctp_network;
|
||||
__kernel_sa_family_t smctp_family;
|
||||
__u16 __smctp_pad0;
|
||||
unsigned int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
__u8 __smctp_pad1;
|
||||
};
|
||||
|
||||
#define MCTP_NET_ANY 0x0
|
||||
|
@ -657,7 +657,7 @@ static int audit_filter_rules(struct task_struct *tsk,
|
||||
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
|
||||
break;
|
||||
case AUDIT_SADDR_FAM:
|
||||
if (ctx->sockaddr)
|
||||
if (ctx && ctx->sockaddr)
|
||||
result = audit_comparator(ctx->sockaddr->ss_family,
|
||||
f->op, f->val);
|
||||
break;
|
||||
|
@ -1072,6 +1072,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
|
||||
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
|
||||
INIT_LIST_HEAD(&aux->poke_progs);
|
||||
mutex_init(&aux->poke_mutex);
|
||||
spin_lock_init(&aux->owner.lock);
|
||||
|
||||
map = array_map_alloc(attr);
|
||||
if (IS_ERR(map)) {
|
||||
|
@ -524,6 +524,7 @@ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
|
||||
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
|
||||
int bpf_jit_harden __read_mostly;
|
||||
long bpf_jit_limit __read_mostly;
|
||||
long bpf_jit_limit_max __read_mostly;
|
||||
|
||||
static void
|
||||
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
|
||||
@ -817,7 +818,8 @@ u64 __weak bpf_jit_alloc_exec_limit(void)
|
||||
static int __init bpf_jit_charge_init(void)
|
||||
{
|
||||
/* Only used as heuristic here to derive limit. */
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
|
||||
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
|
||||
PAGE_SIZE), LONG_MAX);
|
||||
return 0;
|
||||
}
|
||||
@ -1821,20 +1823,26 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array,
|
||||
const struct bpf_prog *fp)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (fp->kprobe_override)
|
||||
return false;
|
||||
|
||||
if (!array->aux->type) {
|
||||
spin_lock(&array->aux->owner.lock);
|
||||
|
||||
if (!array->aux->owner.type) {
|
||||
/* There's no owner yet where we could check for
|
||||
* compatibility.
|
||||
*/
|
||||
array->aux->type = fp->type;
|
||||
array->aux->jited = fp->jited;
|
||||
return true;
|
||||
array->aux->owner.type = fp->type;
|
||||
array->aux->owner.jited = fp->jited;
|
||||
ret = true;
|
||||
} else {
|
||||
ret = array->aux->owner.type == fp->type &&
|
||||
array->aux->owner.jited == fp->jited;
|
||||
}
|
||||
|
||||
return array->aux->type == fp->type &&
|
||||
array->aux->jited == fp->jited;
|
||||
spin_unlock(&array->aux->owner.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||
|
@ -543,8 +543,10 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
|
||||
array = container_of(map, struct bpf_array, map);
|
||||
type = array->aux->type;
|
||||
jited = array->aux->jited;
|
||||
spin_lock(&array->aux->owner.lock);
|
||||
type = array->aux->owner.type;
|
||||
jited = array->aux->owner.jited;
|
||||
spin_unlock(&array->aux->owner.lock);
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
@ -1337,12 +1339,11 @@ int generic_map_update_batch(struct bpf_map *map,
|
||||
void __user *values = u64_to_user_ptr(attr->batch.values);
|
||||
void __user *keys = u64_to_user_ptr(attr->batch.keys);
|
||||
u32 value_size, cp, max_count;
|
||||
int ufd = attr->map_fd;
|
||||
int ufd = attr->batch.map_fd;
|
||||
void *key, *value;
|
||||
struct fd f;
|
||||
int err = 0;
|
||||
|
||||
f = fdget(ufd);
|
||||
if (attr->batch.elem_flags & ~BPF_F_LOCK)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1367,6 +1368,7 @@ int generic_map_update_batch(struct bpf_map *map,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
|
||||
for (cp = 0; cp < max_count; cp++) {
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, keys + cp * map->key_size,
|
||||
@ -1386,6 +1388,7 @@ int generic_map_update_batch(struct bpf_map *map,
|
||||
|
||||
kvfree(value);
|
||||
kvfree(key);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2187,8 +2187,10 @@ static void cgroup_kill_sb(struct super_block *sb)
|
||||
* And don't kill the default root.
|
||||
*/
|
||||
if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
|
||||
!percpu_ref_is_dying(&root->cgrp.self.refcnt))
|
||||
!percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
|
||||
cgroup_bpf_offline(&root->cgrp);
|
||||
percpu_ref_kill(&root->cgrp.self.refcnt);
|
||||
}
|
||||
cgroup_put(&root->cgrp);
|
||||
kernfs_kill_sb(sb);
|
||||
}
|
||||
|
@ -225,8 +225,6 @@ struct cred *cred_alloc_blank(void)
|
||||
#ifdef CONFIG_DEBUG_CREDENTIALS
|
||||
new->magic = CRED_MAGIC;
|
||||
#endif
|
||||
new->ucounts = get_ucounts(&init_ucounts);
|
||||
|
||||
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
|
||||
@ -501,7 +499,7 @@ int commit_creds(struct cred *new)
|
||||
inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
|
||||
rcu_assign_pointer(task->real_cred, new);
|
||||
rcu_assign_pointer(task->cred, new);
|
||||
if (new->user != old->user)
|
||||
if (new->user != old->user || new->user_ns != old->user_ns)
|
||||
dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
|
||||
alter_cred_subscribers(old, -2);
|
||||
|
||||
@ -669,7 +667,7 @@ int set_cred_ucounts(struct cred *new)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
const struct cred *old = task->real_cred;
|
||||
struct ucounts *old_ucounts = new->ucounts;
|
||||
struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
|
||||
|
||||
if (new->user == old->user && new->user_ns == old->user_ns)
|
||||
return 0;
|
||||
@ -681,9 +679,10 @@ int set_cred_ucounts(struct cred *new)
|
||||
if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
|
||||
return 0;
|
||||
|
||||
if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
|
||||
if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
|
||||
return -EAGAIN;
|
||||
|
||||
new->ucounts = new_ucounts;
|
||||
if (old_ucounts)
|
||||
put_ucounts(old_ucounts);
|
||||
|
||||
|
@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
|
||||
* Wrapper function for adding an entry to the hash.
|
||||
* This function takes care of locking itself.
|
||||
*/
|
||||
static void add_dma_entry(struct dma_debug_entry *entry)
|
||||
static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
|
||||
{
|
||||
struct hash_bucket *bucket;
|
||||
unsigned long flags;
|
||||
@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
|
||||
if (rc == -ENOMEM) {
|
||||
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
|
||||
global_disable = true;
|
||||
} else if (rc == -EEXIST) {
|
||||
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
err_printk(entry->dev, entry,
|
||||
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
|
||||
}
|
||||
@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
EXPORT_SYMBOL(debug_dma_map_single);
|
||||
|
||||
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
size_t size, int direction, dma_addr_t dma_addr)
|
||||
size_t size, int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
check_for_illegal_area(dev, addr, size);
|
||||
}
|
||||
|
||||
add_dma_entry(entry);
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
}
|
||||
|
||||
void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int mapped_ents, int direction)
|
||||
int nents, int mapped_ents, int direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
struct scatterlist *s;
|
||||
@ -1289,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
check_for_stack(dev, sg_page(s), s->offset);
|
||||
if (!PageHighMem(sg_page(s)))
|
||||
check_for_illegal_area(dev, sg_virt(s), s->length);
|
||||
}
|
||||
|
||||
for_each_sg(sg, s, mapped_ents, i) {
|
||||
entry = dma_entry_alloc();
|
||||
if (!entry)
|
||||
@ -1304,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
entry->sg_call_ents = nents;
|
||||
entry->sg_mapped_ents = mapped_ents;
|
||||
|
||||
check_for_stack(dev, sg_page(s), s->offset);
|
||||
|
||||
if (!PageHighMem(sg_page(s))) {
|
||||
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
|
||||
}
|
||||
|
||||
check_sg_segment(dev, s);
|
||||
|
||||
add_dma_entry(entry);
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
}
|
||||
|
||||
void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t dma_addr, void *virt)
|
||||
dma_addr_t dma_addr, void *virt,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
else
|
||||
entry->pfn = page_to_pfn(virt_to_page(virt));
|
||||
|
||||
add_dma_entry(entry);
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
|
||||
int direction, dma_addr_t dma_addr)
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
|
||||
entry->direction = direction;
|
||||
entry->map_err_type = MAP_ERR_NOT_CHECKED;
|
||||
|
||||
add_dma_entry(entry);
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
|
@ -11,26 +11,30 @@
|
||||
#ifdef CONFIG_DMA_API_DEBUG
|
||||
extern void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr);
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int mapped_ents, int direction);
|
||||
int nents, int mapped_ents, int direction,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, int dir);
|
||||
|
||||
extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t dma_addr, void *virt);
|
||||
dma_addr_t dma_addr, void *virt,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *virt, dma_addr_t addr);
|
||||
|
||||
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr);
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction);
|
||||
@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
|
||||
#else /* CONFIG_DMA_API_DEBUG */
|
||||
static inline void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr)
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int mapped_ents, int direction)
|
||||
int nents, int mapped_ents, int direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
|
||||
}
|
||||
|
||||
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t dma_addr, void *virt)
|
||||
dma_addr_t dma_addr, void *virt,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
|
||||
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
|
||||
|
||||
return addr;
|
||||
}
|
||||
@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
|
||||
if (ents > 0)
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
|
||||
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
|
||||
ents != -EIO))
|
||||
return -EIO;
|
||||
@ -249,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
|
||||
* Returns 0 on success or a negative error code on error. The following
|
||||
* error codes are supported with the given meaning:
|
||||
*
|
||||
* -EINVAL - An invalid argument, unaligned access or other error
|
||||
* in usage. Will not succeed if retried.
|
||||
* -ENOMEM - Insufficient resources (like memory or IOVA space) to
|
||||
* complete the mapping. Should succeed if retried later.
|
||||
* -EIO - Legacy error code with an unknown meaning. eg. this is
|
||||
* returned if a lower level call returned DMA_MAPPING_ERROR.
|
||||
* -EINVAL An invalid argument, unaligned access or other error
|
||||
* in usage. Will not succeed if retried.
|
||||
* -ENOMEM Insufficient resources (like memory or IOVA space) to
|
||||
* complete the mapping. Should succeed if retried later.
|
||||
* -EIO Legacy error code with an unknown meaning. eg. this is
|
||||
* returned if a lower level call returned DMA_MAPPING_ERROR.
|
||||
*/
|
||||
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
else if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_resource);
|
||||
@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
else
|
||||
return NULL;
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
|
||||
return cpu_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_attrs);
|
||||
@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
|
||||
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
|
||||
|
||||
if (page)
|
||||
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
|
||||
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_alloc_pages);
|
||||
@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||
|
||||
if (sgt) {
|
||||
sgt->nents = 1;
|
||||
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
|
||||
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
|
||||
}
|
||||
return sgt;
|
||||
}
|
||||
|
@ -8795,6 +8795,7 @@ void idle_task_exit(void)
|
||||
finish_arch_post_lock_switch();
|
||||
}
|
||||
|
||||
scs_task_reset(current);
|
||||
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
|
||||
}
|
||||
|
||||
|
@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
|
||||
*/
|
||||
rcu_read_lock();
|
||||
ucounts = task_ucounts(t);
|
||||
sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
|
||||
switch (sigpending) {
|
||||
case 1:
|
||||
if (likely(get_ucounts(ucounts)))
|
||||
break;
|
||||
fallthrough;
|
||||
case LONG_MAX:
|
||||
/*
|
||||
* we need to decrease the ucount in the userns tree on any
|
||||
* failure to avoid counts leaking.
|
||||
*/
|
||||
dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
|
||||
rcu_read_unlock();
|
||||
if (!sigpending)
|
||||
return NULL;
|
||||
|
||||
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
|
||||
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
|
||||
@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
|
||||
}
|
||||
|
||||
if (unlikely(q == NULL)) {
|
||||
if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
|
||||
put_ucounts(ucounts);
|
||||
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
|
||||
} else {
|
||||
INIT_LIST_HEAD(&q->list);
|
||||
q->flags = sigqueue_flags;
|
||||
@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
|
||||
{
|
||||
if (q->flags & SIGQUEUE_PREALLOC)
|
||||
return;
|
||||
if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
|
||||
put_ucounts(q->ucounts);
|
||||
if (q->ucounts) {
|
||||
dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
|
||||
q->ucounts = NULL;
|
||||
}
|
||||
kmem_cache_free(sigqueue_cachep, q);
|
||||
|
@ -2208,7 +2208,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_update_record, set a record that now is tracing or not
|
||||
* ftrace_update_record - set a record that now is tracing or not
|
||||
* @rec: the record to update
|
||||
* @enable: set to true if the record is tracing, false to force disable
|
||||
*
|
||||
@ -2221,7 +2221,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_test_record, check if the record has been enabled or not
|
||||
* ftrace_test_record - check if the record has been enabled or not
|
||||
* @rec: the record to test
|
||||
* @enable: set to true to check if enabled, false if it is disabled
|
||||
*
|
||||
@ -2574,7 +2574,7 @@ struct ftrace_rec_iter {
|
||||
};
|
||||
|
||||
/**
|
||||
* ftrace_rec_iter_start, start up iterating over traced functions
|
||||
* ftrace_rec_iter_start - start up iterating over traced functions
|
||||
*
|
||||
* Returns an iterator handle that is used to iterate over all
|
||||
* the records that represent address locations where functions
|
||||
@ -2605,7 +2605,7 @@ struct ftrace_rec_iter *ftrace_rec_iter_start(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_rec_iter_next, get the next record to process.
|
||||
* ftrace_rec_iter_next - get the next record to process.
|
||||
* @iter: The handle to the iterator.
|
||||
*
|
||||
* Returns the next iterator after the given iterator @iter.
|
||||
@ -2630,7 +2630,7 @@ struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_rec_iter_record, get the record at the iterator location
|
||||
* ftrace_rec_iter_record - get the record at the iterator location
|
||||
* @iter: The current iterator location
|
||||
*
|
||||
* Returns the record that the current @iter is at.
|
||||
@ -2733,7 +2733,7 @@ static int __ftrace_modify_code(void *data)
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_run_stop_machine, go back to the stop machine method
|
||||
* ftrace_run_stop_machine - go back to the stop machine method
|
||||
* @command: The command to tell ftrace what to do
|
||||
*
|
||||
* If an arch needs to fall back to the stop machine method, the
|
||||
@ -2745,7 +2745,7 @@ void ftrace_run_stop_machine(int command)
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_ftrace_update_code, modify the code to trace or not trace
|
||||
* arch_ftrace_update_code - modify the code to trace or not trace
|
||||
* @command: The command that needs to be done
|
||||
*
|
||||
* Archs can override this function if it does not need to
|
||||
@ -6977,7 +6977,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op;
|
||||
int bit;
|
||||
|
||||
bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
|
||||
bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
|
||||
if (bit < 0)
|
||||
return;
|
||||
|
||||
@ -7052,7 +7052,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
|
||||
{
|
||||
int bit;
|
||||
|
||||
bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
|
||||
bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
|
||||
if (bit < 0)
|
||||
return;
|
||||
|
||||
@ -7525,7 +7525,9 @@ void ftrace_kill(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if ftrace is dead or not.
|
||||
* ftrace_is_dead - Test if ftrace is dead or not.
|
||||
*
|
||||
* Returns 1 if ftrace is "dead", zero otherwise.
|
||||
*/
|
||||
int ftrace_is_dead(void)
|
||||
{
|
||||
|
@ -904,8 +904,8 @@ static int __trace_eprobe_create(int argc, const char *argv[])
|
||||
|
||||
if (IS_ERR(ep)) {
|
||||
ret = PTR_ERR(ep);
|
||||
/* This must return -ENOMEM, else there is a bug */
|
||||
WARN_ON_ONCE(ret != -ENOMEM);
|
||||
/* This must return -ENOMEM or missing event, else there is a bug */
|
||||
WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
|
||||
ep = NULL;
|
||||
goto error;
|
||||
}
|
||||
|
@ -284,6 +284,55 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
|
||||
return (new == 0);
|
||||
}
|
||||
|
||||
static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
|
||||
struct ucounts *last, enum ucount_type type)
|
||||
{
|
||||
struct ucounts *iter, *next;
|
||||
for (iter = ucounts; iter != last; iter = next) {
|
||||
long dec = atomic_long_add_return(-1, &iter->ucount[type]);
|
||||
WARN_ON_ONCE(dec < 0);
|
||||
next = iter->ns->ucounts;
|
||||
if (dec == 0)
|
||||
put_ucounts(iter);
|
||||
}
|
||||
}
|
||||
|
||||
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
|
||||
{
|
||||
do_dec_rlimit_put_ucounts(ucounts, NULL, type);
|
||||
}
|
||||
|
||||
long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
|
||||
{
|
||||
/* Caller must hold a reference to ucounts */
|
||||
struct ucounts *iter;
|
||||
long dec, ret = 0;
|
||||
|
||||
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
|
||||
long max = READ_ONCE(iter->ns->ucount_max[type]);
|
||||
long new = atomic_long_add_return(1, &iter->ucount[type]);
|
||||
if (new < 0 || new > max)
|
||||
goto unwind;
|
||||
if (iter == ucounts)
|
||||
ret = new;
|
||||
/*
|
||||
* Grab an extra ucount reference for the caller when
|
||||
* the rlimit count was previously 0.
|
||||
*/
|
||||
if (new != 1)
|
||||
continue;
|
||||
if (!get_ucounts(iter))
|
||||
goto dec_unwind;
|
||||
}
|
||||
return ret;
|
||||
dec_unwind:
|
||||
dec = atomic_long_add_return(-1, &iter->ucount[type]);
|
||||
WARN_ON_ONCE(dec < 0);
|
||||
unwind:
|
||||
do_dec_rlimit_put_ucounts(ucounts, iter, type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
|
||||
{
|
||||
struct ucounts *iter;
|
||||
|
@ -2426,6 +2426,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||
/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
|
||||
lruvec = lock_page_lruvec(head);
|
||||
|
||||
ClearPageHasHWPoisoned(head);
|
||||
|
||||
for (i = nr - 1; i >= 1; i--) {
|
||||
__split_huge_page_tail(head, i, lruvec, list);
|
||||
/* Some pages can be beyond EOF: drop them from page cache */
|
||||
@ -2700,12 +2702,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
if (mapping) {
|
||||
int nr = thp_nr_pages(head);
|
||||
|
||||
if (PageSwapBacked(head))
|
||||
if (PageSwapBacked(head)) {
|
||||
__mod_lruvec_page_state(head, NR_SHMEM_THPS,
|
||||
-nr);
|
||||
else
|
||||
} else {
|
||||
__mod_lruvec_page_state(head, NR_FILE_THPS,
|
||||
-nr);
|
||||
filemap_nr_thps_dec(mapping);
|
||||
}
|
||||
}
|
||||
|
||||
__split_huge_page(page, list, end);
|
||||
|
@ -445,22 +445,25 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
|
||||
if (!transhuge_vma_enabled(vma, vm_flags))
|
||||
return false;
|
||||
|
||||
if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
|
||||
vma->vm_pgoff, HPAGE_PMD_NR))
|
||||
return false;
|
||||
|
||||
/* Enabled via shmem mount options or sysfs settings. */
|
||||
if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
|
||||
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
|
||||
HPAGE_PMD_NR);
|
||||
}
|
||||
if (shmem_file(vma->vm_file))
|
||||
return shmem_huge_enabled(vma);
|
||||
|
||||
/* THP settings require madvise. */
|
||||
if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
|
||||
return false;
|
||||
|
||||
/* Read-only file mappings need to be aligned for THP to work. */
|
||||
/* Only regular file is valid */
|
||||
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
|
||||
!inode_is_open_for_write(vma->vm_file->f_inode) &&
|
||||
(vm_flags & VM_EXEC)) {
|
||||
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
|
||||
HPAGE_PMD_NR);
|
||||
struct inode *inode = vma->vm_file->f_inode;
|
||||
|
||||
return !inode_is_open_for_write(inode) &&
|
||||
S_ISREG(inode->i_mode);
|
||||
}
|
||||
|
||||
if (!vma->anon_vma || vma->vm_ops)
|
||||
@ -1763,6 +1766,10 @@ static void collapse_file(struct mm_struct *mm,
|
||||
filemap_flush(mapping);
|
||||
result = SCAN_FAIL;
|
||||
goto xa_unlocked;
|
||||
} else if (PageWriteback(page)) {
|
||||
xas_unlock_irq(&xas);
|
||||
result = SCAN_FAIL;
|
||||
goto xa_unlocked;
|
||||
} else if (trylock_page(page)) {
|
||||
get_page(page);
|
||||
xas_unlock_irq(&xas);
|
||||
@ -1798,7 +1805,8 @@ static void collapse_file(struct mm_struct *mm,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!is_shmem && PageDirty(page)) {
|
||||
if (!is_shmem && (PageDirty(page) ||
|
||||
PageWriteback(page))) {
|
||||
/*
|
||||
* khugepaged only works on read-only fd, so this
|
||||
* page is dirty because it hasn't been flushed
|
||||
|
@ -932,16 +932,14 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
|
||||
* covered by the memory map. The struct page representing NOMAP memory
|
||||
* frames in the memory map will be PageReserved()
|
||||
*
|
||||
* Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
|
||||
* memblock, the caller must inform kmemleak to ignore that memory
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
|
||||
{
|
||||
int ret = memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
|
||||
|
||||
if (!ret)
|
||||
kmemleak_free_part_phys(base, size);
|
||||
|
||||
return ret;
|
||||
return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1692,7 +1690,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
if (memblock.memory.cnt <= 1) {
|
||||
if (!memblock_memory->total_size) {
|
||||
pr_warn("%s: No memory registered yet\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
@ -1147,20 +1147,6 @@ static int __get_hwpoison_page(struct page *page)
|
||||
if (!HWPoisonHandlable(head))
|
||||
return -EBUSY;
|
||||
|
||||
if (PageTransHuge(head)) {
|
||||
/*
|
||||
* Non anonymous thp exists only in allocation/free time. We
|
||||
* can't handle such a case correctly, so let's give it up.
|
||||
* This should be better than triggering BUG_ON when kernel
|
||||
* tries to touch the "partially handled" page.
|
||||
*/
|
||||
if (!PageAnon(head)) {
|
||||
pr_err("Memory failure: %#lx: non anonymous thp\n",
|
||||
page_to_pfn(page));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (get_page_unless_zero(head)) {
|
||||
if (head == compound_head(page))
|
||||
return 1;
|
||||
@ -1708,6 +1694,20 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
}
|
||||
|
||||
if (PageTransHuge(hpage)) {
|
||||
/*
|
||||
* The flag must be set after the refcount is bumped
|
||||
* otherwise it may race with THP split.
|
||||
* And the flag can't be set in get_hwpoison_page() since
|
||||
* it is called by soft offline too and it is just called
|
||||
* for !MF_COUNT_INCREASE. So here seems to be the best
|
||||
* place.
|
||||
*
|
||||
* Don't need care about the above error handling paths for
|
||||
* get_hwpoison_page() since they handle either free page
|
||||
* or unhandlable page. The refcount is bumped iff the
|
||||
* page is a valid handlable page.
|
||||
*/
|
||||
SetPageHasHWPoisoned(hpage);
|
||||
if (try_to_split_thp_page(p, "Memory Failure") < 0) {
|
||||
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
|
||||
res = -EBUSY;
|
||||
|
@ -3906,6 +3906,15 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
|
||||
if (compound_order(page) != HPAGE_PMD_ORDER)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Just backoff if any subpage of a THP is corrupted otherwise
|
||||
* the corrupted page may mapped by PMD silently to escape the
|
||||
* check. This kind of THP just can be PTE mapped. Access to
|
||||
* the corrupted subpage should trigger SIGBUS as expected.
|
||||
*/
|
||||
if (unlikely(PageHasHWPoisoned(page)))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Archs like ppc64 need additional space to store information
|
||||
* related to pte entry. Use the preallocated table for that.
|
||||
|
@ -856,16 +856,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & MPOL_F_NUMA_BALANCING) {
|
||||
if (new && new->mode == MPOL_BIND) {
|
||||
new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
mpol_put(new);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = mpol_set_nodemask(new, nodes, scratch);
|
||||
if (ret) {
|
||||
mpol_put(new);
|
||||
@ -1458,7 +1448,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
|
||||
return -EINVAL;
|
||||
if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
|
||||
return -EINVAL;
|
||||
|
||||
if (*flags & MPOL_F_NUMA_BALANCING) {
|
||||
if (*mode != MPOL_BIND)
|
||||
return -EINVAL;
|
||||
*flags |= (MPOL_F_MOF | MPOL_F_MORON);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
62
mm/migrate.c
62
mm/migrate.c
@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
|
||||
EXPORT_SYMBOL(migrate_vma_finalize);
|
||||
#endif /* CONFIG_DEVICE_PRIVATE */
|
||||
|
||||
#if defined(CONFIG_MEMORY_HOTPLUG)
|
||||
#if defined(CONFIG_HOTPLUG_CPU)
|
||||
/* Disable reclaim-based migration. */
|
||||
static void __disable_all_migrate_targets(void)
|
||||
{
|
||||
@ -3208,25 +3208,6 @@ static void set_migration_target_nodes(void)
|
||||
put_online_mems();
|
||||
}
|
||||
|
||||
/*
|
||||
* React to hotplug events that might affect the migration targets
|
||||
* like events that online or offline NUMA nodes.
|
||||
*
|
||||
* The ordering is also currently dependent on which nodes have
|
||||
* CPUs. That means we need CPU on/offline notification too.
|
||||
*/
|
||||
static int migration_online_cpu(unsigned int cpu)
|
||||
{
|
||||
set_migration_target_nodes();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int migration_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
set_migration_target_nodes();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This leaves migrate-on-reclaim transiently disabled between
|
||||
* the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
|
||||
@ -3239,8 +3220,18 @@ static int migration_offline_cpu(unsigned int cpu)
|
||||
* set_migration_target_nodes().
|
||||
*/
|
||||
static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
|
||||
unsigned long action, void *arg)
|
||||
unsigned long action, void *_arg)
|
||||
{
|
||||
struct memory_notify *arg = _arg;
|
||||
|
||||
/*
|
||||
* Only update the node migration order when a node is
|
||||
* changing status, like online->offline. This avoids
|
||||
* the overhead of synchronize_rcu() in most cases.
|
||||
*/
|
||||
if (arg->status_change_nid < 0)
|
||||
return notifier_from_errno(0);
|
||||
|
||||
switch (action) {
|
||||
case MEM_GOING_OFFLINE:
|
||||
/*
|
||||
@ -3274,13 +3265,31 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
|
||||
return notifier_from_errno(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* React to hotplug events that might affect the migration targets
|
||||
* like events that online or offline NUMA nodes.
|
||||
*
|
||||
* The ordering is also currently dependent on which nodes have
|
||||
* CPUs. That means we need CPU on/offline notification too.
|
||||
*/
|
||||
static int migration_online_cpu(unsigned int cpu)
|
||||
{
|
||||
set_migration_target_nodes();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int migration_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
set_migration_target_nodes();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init migrate_on_reclaim_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "migrate on reclaim",
|
||||
migration_online_cpu,
|
||||
migration_offline_cpu);
|
||||
ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
|
||||
NULL, migration_offline_cpu);
|
||||
/*
|
||||
* In the unlikely case that this fails, the automatic
|
||||
* migration targets may become suboptimal for nodes
|
||||
@ -3288,9 +3297,12 @@ static int __init migrate_on_reclaim_init(void)
|
||||
* rare case, do not bother trying to do anything special.
|
||||
*/
|
||||
WARN_ON(ret < 0);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
|
||||
migration_online_cpu, NULL);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(migrate_on_reclaim_init);
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
@ -1150,7 +1150,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
|
||||
struct task_struct *task;
|
||||
struct task_struct *p;
|
||||
unsigned int f_flags;
|
||||
bool reap = true;
|
||||
bool reap = false;
|
||||
struct pid *pid;
|
||||
long ret = 0;
|
||||
|
||||
@ -1177,15 +1177,15 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
mm = p->mm;
|
||||
mmgrab(mm);
|
||||
|
||||
/* If the work has been done already, just exit with success */
|
||||
if (test_bit(MMF_OOM_SKIP, &mm->flags))
|
||||
reap = false;
|
||||
else if (!task_will_free_mem(p)) {
|
||||
reap = false;
|
||||
ret = -EINVAL;
|
||||
if (mmget_not_zero(p->mm)) {
|
||||
mm = p->mm;
|
||||
if (task_will_free_mem(p))
|
||||
reap = true;
|
||||
else {
|
||||
/* Error only if the work has not been done already */
|
||||
if (!test_bit(MMF_OOM_SKIP, &mm->flags))
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
task_unlock(p);
|
||||
|
||||
@ -1201,7 +1201,8 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
drop_mm:
|
||||
mmdrop(mm);
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
put_task:
|
||||
put_task_struct(task);
|
||||
put_pid:
|
||||
|
@ -1312,8 +1312,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
|
||||
VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
|
||||
|
||||
if (compound)
|
||||
if (compound) {
|
||||
ClearPageDoubleMap(page);
|
||||
ClearPageHasHWPoisoned(page);
|
||||
}
|
||||
for (i = 1; i < (1 << order); i++) {
|
||||
if (compound)
|
||||
bad += free_tail_pages_check(page, page + i);
|
||||
@ -5223,6 +5225,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
||||
if (unlikely(page_array && nr_pages - nr_populated == 0))
|
||||
goto out;
|
||||
|
||||
/* Bulk allocator does not support memcg accounting. */
|
||||
if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
|
||||
goto failed;
|
||||
|
||||
/* Use the single page allocator for one page. */
|
||||
if (nr_pages - nr_populated == 1)
|
||||
goto failed;
|
||||
|
@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
|
||||
total_usage += table_size;
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
||||
static void free_page_ext(void *addr)
|
||||
{
|
||||
if (is_vmalloc_addr(addr)) {
|
||||
@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void __init page_ext_init(void)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/secretmem.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#include <uapi/linux/magic.h>
|
||||
|
||||
@ -41,11 +40,11 @@ module_param_named(enable, secretmem_enable, bool, 0400);
|
||||
MODULE_PARM_DESC(secretmem_enable,
|
||||
"Enable secretmem and memfd_secret(2) system call");
|
||||
|
||||
static refcount_t secretmem_users;
|
||||
static atomic_t secretmem_users;
|
||||
|
||||
bool secretmem_active(void)
|
||||
{
|
||||
return !!refcount_read(&secretmem_users);
|
||||
return !!atomic_read(&secretmem_users);
|
||||
}
|
||||
|
||||
static vm_fault_t secretmem_fault(struct vm_fault *vmf)
|
||||
@ -104,7 +103,7 @@ static const struct vm_operations_struct secretmem_vm_ops = {
|
||||
|
||||
static int secretmem_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
refcount_dec(&secretmem_users);
|
||||
atomic_dec(&secretmem_users);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -204,6 +203,8 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
|
||||
|
||||
if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
|
||||
return -EINVAL;
|
||||
if (atomic_read(&secretmem_users) < 0)
|
||||
return -ENFILE;
|
||||
|
||||
fd = get_unused_fd_flags(flags & O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
@ -217,8 +218,8 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
|
||||
|
||||
file->f_flags |= O_LARGEFILE;
|
||||
|
||||
atomic_inc(&secretmem_users);
|
||||
fd_install(fd, file);
|
||||
refcount_inc(&secretmem_users);
|
||||
return fd;
|
||||
|
||||
err_put_fd:
|
||||
|
@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||
#if defined(CONFIG_NUMA)
|
||||
/*
|
||||
* Drains freelist for a node on each slab cache, used for memory hot-remove.
|
||||
* Returns -EBUSY if all objects cannot be drained so that the node is not
|
||||
@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
|
||||
out:
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* swap the static kmem_cache_node with kmalloced memory
|
||||
|
33
mm/slub.c
33
mm/slub.c
@ -1701,7 +1701,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
|
||||
}
|
||||
|
||||
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||
void **head, void **tail)
|
||||
void **head, void **tail,
|
||||
int *cnt)
|
||||
{
|
||||
|
||||
void *object;
|
||||
@ -1728,6 +1729,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||
*head = object;
|
||||
if (!*tail)
|
||||
*tail = object;
|
||||
} else {
|
||||
/*
|
||||
* Adjust the reconstructed freelist depth
|
||||
* accordingly if object's reuse is delayed.
|
||||
*/
|
||||
--(*cnt);
|
||||
}
|
||||
} while (object != old_tail);
|
||||
|
||||
@ -3413,7 +3420,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
|
||||
struct kmem_cache_cpu *c;
|
||||
unsigned long tid;
|
||||
|
||||
memcg_slab_free_hook(s, &head, 1);
|
||||
/* memcg_slab_free_hook() is already called for bulk free. */
|
||||
if (!tail)
|
||||
memcg_slab_free_hook(s, &head, 1);
|
||||
redo:
|
||||
/*
|
||||
* Determine the currently cpus per cpu slab.
|
||||
@ -3480,7 +3489,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
||||
* With KASAN enabled slab_free_freelist_hook modifies the freelist
|
||||
* to remove objects, whose reuse must be delayed.
|
||||
*/
|
||||
if (slab_free_freelist_hook(s, &head, &tail))
|
||||
if (slab_free_freelist_hook(s, &head, &tail, &cnt))
|
||||
do_slab_free(s, page, head, tail, cnt, addr);
|
||||
}
|
||||
|
||||
@ -4203,8 +4212,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
if (alloc_kmem_cache_cpus(s))
|
||||
return 0;
|
||||
|
||||
free_kmem_cache_nodes(s);
|
||||
error:
|
||||
__kmem_cache_release(s);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -4880,13 +4889,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
|
||||
return 0;
|
||||
|
||||
err = sysfs_slab_add(s);
|
||||
if (err)
|
||||
if (err) {
|
||||
__kmem_cache_release(s);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
debugfs_slab_add(s);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||
@ -6108,10 +6119,15 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
||||
struct kmem_cache *s = file_inode(filep)->i_private;
|
||||
unsigned long *obj_map;
|
||||
|
||||
obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
|
||||
if (!obj_map)
|
||||
if (!t)
|
||||
return -ENOMEM;
|
||||
|
||||
obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
|
||||
if (!obj_map) {
|
||||
seq_release_private(inode, filep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
|
||||
alloc = TRACK_ALLOC;
|
||||
else
|
||||
@ -6119,6 +6135,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
||||
|
||||
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
|
||||
bitmap_free(obj_map);
|
||||
seq_release_private(inode, filep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
15
mm/vmalloc.c
15
mm/vmalloc.c
@ -2816,6 +2816,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
unsigned int order, unsigned int nr_pages, struct page **pages)
|
||||
{
|
||||
unsigned int nr_allocated = 0;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For order-0 pages we make use of bulk allocator, if
|
||||
@ -2823,7 +2825,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
* to fails, fallback to a single page allocator that is
|
||||
* more permissive.
|
||||
*/
|
||||
if (!order) {
|
||||
if (!order && nid != NUMA_NO_NODE) {
|
||||
while (nr_allocated < nr_pages) {
|
||||
unsigned int nr, nr_pages_request;
|
||||
|
||||
@ -2848,7 +2850,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
if (nr != nr_pages_request)
|
||||
break;
|
||||
}
|
||||
} else
|
||||
} else if (order)
|
||||
/*
|
||||
* Compound pages required for remap_vmalloc_page if
|
||||
* high-order pages.
|
||||
@ -2856,11 +2858,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
|
||||
gfp |= __GFP_COMP;
|
||||
|
||||
/* High-order pages or fallback path if "bulk" fails. */
|
||||
while (nr_allocated < nr_pages) {
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
page = alloc_pages_node(nid, gfp, order);
|
||||
while (nr_allocated < nr_pages) {
|
||||
if (nid == NUMA_NO_NODE)
|
||||
page = alloc_pages(gfp, order);
|
||||
else
|
||||
page = alloc_pages_node(nid, gfp, order);
|
||||
if (unlikely(!page))
|
||||
break;
|
||||
|
||||
|
@ -1560,11 +1560,15 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
|
||||
return 0;
|
||||
|
||||
bat_priv->bla.claim_hash = batadv_hash_new(128);
|
||||
bat_priv->bla.backbone_hash = batadv_hash_new(32);
|
||||
|
||||
if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
|
||||
if (!bat_priv->bla.claim_hash)
|
||||
return -ENOMEM;
|
||||
|
||||
bat_priv->bla.backbone_hash = batadv_hash_new(32);
|
||||
if (!bat_priv->bla.backbone_hash) {
|
||||
batadv_hash_destroy(bat_priv->bla.claim_hash);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
|
||||
&batadv_claim_hash_lock_class_key);
|
||||
batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
|
||||
|
@ -190,29 +190,41 @@ int batadv_mesh_init(struct net_device *soft_iface)
|
||||
|
||||
bat_priv->gw.generation = 0;
|
||||
|
||||
ret = batadv_v_mesh_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
ret = batadv_originator_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_orig;
|
||||
}
|
||||
|
||||
ret = batadv_tt_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_tt;
|
||||
}
|
||||
|
||||
ret = batadv_v_mesh_init(bat_priv);
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_v;
|
||||
}
|
||||
|
||||
ret = batadv_bla_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_bla;
|
||||
}
|
||||
|
||||
ret = batadv_dat_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_dat;
|
||||
}
|
||||
|
||||
ret = batadv_nc_mesh_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_nc;
|
||||
}
|
||||
|
||||
batadv_gw_init(bat_priv);
|
||||
batadv_mcast_init(bat_priv);
|
||||
@ -222,8 +234,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
batadv_mesh_free(soft_iface);
|
||||
err_nc:
|
||||
batadv_dat_free(bat_priv);
|
||||
err_dat:
|
||||
batadv_bla_free(bat_priv);
|
||||
err_bla:
|
||||
batadv_v_mesh_free(bat_priv);
|
||||
err_v:
|
||||
batadv_tt_free(bat_priv);
|
||||
err_tt:
|
||||
batadv_originator_free(bat_priv);
|
||||
err_orig:
|
||||
batadv_purge_outstanding_packets(bat_priv, NULL);
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -152,8 +152,10 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
|
||||
&batadv_nc_coding_hash_lock_class_key);
|
||||
|
||||
bat_priv->nc.decoding_hash = batadv_hash_new(128);
|
||||
if (!bat_priv->nc.decoding_hash)
|
||||
if (!bat_priv->nc.decoding_hash) {
|
||||
batadv_hash_destroy(bat_priv->nc.coding_hash);
|
||||
goto err;
|
||||
}
|
||||
|
||||
batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
|
||||
&batadv_nc_decoding_hash_lock_class_key);
|
||||
|
@ -4162,8 +4162,10 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
|
||||
return ret;
|
||||
|
||||
ret = batadv_tt_global_init(bat_priv);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
batadv_tt_local_table_free(bat_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
|
||||
batadv_tt_tvlv_unicast_handler_v1,
|
||||
|
@ -1125,9 +1125,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge_mcast *brm
|
||||
|
||||
static inline unsigned long br_multicast_gmi(const struct net_bridge_mcast *brmctx)
|
||||
{
|
||||
/* use the RFC default of 2 for QRV */
|
||||
return 2 * brmctx->multicast_query_interval +
|
||||
brmctx->multicast_query_response_interval;
|
||||
return brmctx->multicast_membership_interval;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -926,7 +926,9 @@ static int translate_table(struct net *net, const char *name,
|
||||
return -ENOMEM;
|
||||
for_each_possible_cpu(i) {
|
||||
newinfo->chainstack[i] =
|
||||
vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
|
||||
vmalloc_node(array_size(udc_cnt,
|
||||
sizeof(*(newinfo->chainstack[0]))),
|
||||
cpu_to_node(i));
|
||||
if (!newinfo->chainstack[i]) {
|
||||
while (i)
|
||||
vfree(newinfo->chainstack[--i]);
|
||||
|
@ -121,7 +121,7 @@ enum {
|
||||
struct tpcon {
|
||||
int idx;
|
||||
int len;
|
||||
u8 state;
|
||||
u32 state;
|
||||
u8 bs;
|
||||
u8 sn;
|
||||
u8 ll_dl;
|
||||
@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct isotp_sock *so = isotp_sk(sk);
|
||||
u32 old_state = so->tx.state;
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
struct canfd_frame *cf;
|
||||
@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
/* we do not support multiple buffers - for now */
|
||||
if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
|
||||
if (msg->msg_flags & MSG_DONTWAIT)
|
||||
return -EAGAIN;
|
||||
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
|
||||
wq_has_sleeper(&so->wait)) {
|
||||
if (msg->msg_flags & MSG_DONTWAIT) {
|
||||
err = -EAGAIN;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!size || size > MAX_MSG_LENGTH)
|
||||
return -EINVAL;
|
||||
if (!size || size > MAX_MSG_LENGTH) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
|
||||
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
|
||||
|
||||
/* does the given data fit into a single frame for SF_BROADCAST? */
|
||||
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
|
||||
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
|
||||
return -EINVAL;
|
||||
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = memcpy_from_msg(so->tx.buf, msg, size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto err_out;
|
||||
|
||||
dev = dev_get_by_index(sock_net(sk), so->ifindex);
|
||||
if (!dev)
|
||||
return -ENXIO;
|
||||
if (!dev) {
|
||||
err = -ENXIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
|
||||
msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb) {
|
||||
dev_put(dev);
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
can_skb_reserve(skb);
|
||||
can_skb_prv(skb)->ifindex = dev->ifindex;
|
||||
can_skb_prv(skb)->skbcnt = 0;
|
||||
|
||||
so->tx.state = ISOTP_SENDING;
|
||||
so->tx.len = size;
|
||||
so->tx.idx = 0;
|
||||
|
||||
@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
if (err) {
|
||||
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
|
||||
__func__, ERR_PTR(err));
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (wait_tx_done) {
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
|
||||
if (sk->sk_err)
|
||||
return -sk->sk_err;
|
||||
}
|
||||
|
||||
return size;
|
||||
|
||||
err_out:
|
||||
so->tx.state = old_state;
|
||||
if (so->tx.state == ISOTP_IDLE)
|
||||
wake_up_interruptible(&so->wait);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
|
@ -330,6 +330,7 @@ int j1939_session_activate(struct j1939_session *session);
|
||||
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
|
||||
void j1939_session_timers_cancel(struct j1939_session *session);
|
||||
|
||||
#define J1939_MIN_TP_PACKET_SIZE 9
|
||||
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
|
||||
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
|
||||
|
||||
|
@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
||||
struct j1939_priv *priv, *priv_new;
|
||||
int ret;
|
||||
|
||||
priv = j1939_priv_get_by_ndev(ndev);
|
||||
spin_lock(&j1939_netdev_lock);
|
||||
priv = j1939_priv_get_by_ndev_locked(ndev);
|
||||
if (priv) {
|
||||
kref_get(&priv->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
return priv;
|
||||
}
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
|
||||
priv = j1939_priv_create(ndev);
|
||||
if (!priv)
|
||||
@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
||||
/* Someone was faster than us, use their priv and roll
|
||||
* back our's.
|
||||
*/
|
||||
kref_get(&priv_new->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
dev_put(ndev);
|
||||
kfree(priv);
|
||||
kref_get(&priv_new->rx_kref);
|
||||
return priv_new;
|
||||
}
|
||||
j1939_priv_set(ndev, priv);
|
||||
|
@ -1237,12 +1237,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
|
||||
session->err = -ETIME;
|
||||
j1939_session_deactivate(session);
|
||||
} else {
|
||||
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
|
||||
__func__, session);
|
||||
|
||||
j1939_session_list_lock(session->priv);
|
||||
if (session->state >= J1939_SESSION_ACTIVE &&
|
||||
session->state < J1939_SESSION_ACTIVE_MAX) {
|
||||
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
|
||||
__func__, session);
|
||||
j1939_session_get(session);
|
||||
hrtimer_start(&session->rxtimer,
|
||||
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
|
||||
@ -1609,6 +1608,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
|
||||
abort = J1939_XTP_ABORT_FAULT;
|
||||
else if (len > priv->tp_max_packet_size)
|
||||
abort = J1939_XTP_ABORT_RESOURCE;
|
||||
else if (len < J1939_MIN_TP_PACKET_SIZE)
|
||||
abort = J1939_XTP_ABORT_FAULT;
|
||||
}
|
||||
|
||||
if (abort != J1939_XTP_NO_ABORT) {
|
||||
@ -1789,6 +1790,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
|
||||
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
|
||||
struct j1939_priv *priv = session->priv;
|
||||
struct j1939_sk_buff_cb *skcb, *se_skcb;
|
||||
struct sk_buff *se_skb = NULL;
|
||||
@ -1803,9 +1805,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
|
||||
skcb = j1939_skb_to_cb(skb);
|
||||
dat = skb->data;
|
||||
if (skb->len <= 1)
|
||||
if (skb->len != 8) {
|
||||
/* makes no sense */
|
||||
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
|
||||
goto out_session_cancel;
|
||||
}
|
||||
|
||||
switch (session->last_cmd) {
|
||||
case 0xff:
|
||||
@ -1904,7 +1908,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
out_session_cancel:
|
||||
kfree_skb(se_skb);
|
||||
j1939_session_timers_cancel(session);
|
||||
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
|
||||
j1939_session_cancel(session, abort);
|
||||
j1939_session_put(session);
|
||||
}
|
||||
|
||||
|
@ -3163,6 +3163,12 @@ static u16 skb_tx_hash(const struct net_device *dev,
|
||||
|
||||
qoffset = sb_dev->tc_to_txq[tc].offset;
|
||||
qcount = sb_dev->tc_to_txq[tc].count;
|
||||
if (unlikely(!qcount)) {
|
||||
net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
|
||||
sb_dev->name, qoffset, tc);
|
||||
qoffset = 0;
|
||||
qcount = dev->real_num_tx_queues;
|
||||
}
|
||||
}
|
||||
|
||||
if (skb_rx_queue_recorded(skb)) {
|
||||
@ -3906,7 +3912,8 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
skb_reset_mac_header(skb);
|
||||
__skb_pull(skb, skb_network_offset(skb));
|
||||
skb->pkt_type = PACKET_LOOPBACK;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
WARN_ON(!skb_dst(skb));
|
||||
skb_dst_force(skb);
|
||||
netif_rx_ni(skb);
|
||||
|
@ -1973,9 +1973,9 @@ int netdev_register_kobject(struct net_device *ndev)
|
||||
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
|
||||
const struct net *net_new)
|
||||
{
|
||||
kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
|
||||
kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
|
||||
struct device *dev = &ndev->dev;
|
||||
kuid_t old_uid, new_uid;
|
||||
kgid_t old_gid, new_gid;
|
||||
int error;
|
||||
|
||||
net_ns_get_ownership(net_old, &old_uid, &old_gid);
|
||||
|
@ -80,6 +80,7 @@
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include "datagram.h"
|
||||
#include "sock_destructor.h"
|
||||
|
||||
struct kmem_cache *skbuff_head_cache __ro_after_init;
|
||||
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
|
||||
@ -1804,30 +1805,39 @@ EXPORT_SYMBOL(skb_realloc_headroom);
|
||||
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
|
||||
{
|
||||
int delta = headroom - skb_headroom(skb);
|
||||
int osize = skb_end_offset(skb);
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (WARN_ONCE(delta <= 0,
|
||||
"%s is expecting an increase in the headroom", __func__))
|
||||
return skb;
|
||||
|
||||
/* pskb_expand_head() might crash, if skb is shared */
|
||||
if (skb_shared(skb)) {
|
||||
delta = SKB_DATA_ALIGN(delta);
|
||||
/* pskb_expand_head() might crash, if skb is shared. */
|
||||
if (skb_shared(skb) || !is_skb_wmem(skb)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (likely(nskb)) {
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
if (unlikely(!nskb))
|
||||
goto fail;
|
||||
|
||||
if (sk)
|
||||
skb_set_owner_w(nskb, sk);
|
||||
consume_skb(skb);
|
||||
skb = nskb;
|
||||
}
|
||||
if (skb &&
|
||||
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
|
||||
goto fail;
|
||||
|
||||
if (sk && is_skb_wmem(skb)) {
|
||||
delta = skb_end_offset(skb) - osize;
|
||||
refcount_add(delta, &sk->sk_wmem_alloc);
|
||||
skb->truesize += delta;
|
||||
}
|
||||
return skb;
|
||||
|
||||
fail:
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_expand_head);
|
||||
|
||||
|
@ -474,6 +474,20 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
|
||||
|
||||
bool sk_msg_is_readable(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
bool empty = true;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock))
|
||||
empty = list_empty(&psock->ingress_msg);
|
||||
rcu_read_unlock();
|
||||
return !empty;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
|
||||
|
||||
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
12
net/core/sock_destructor.h
Normal file
12
net/core/sock_destructor.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#ifndef _NET_CORE_SOCK_DESTRUCTOR_H
|
||||
#define _NET_CORE_SOCK_DESTRUCTOR_H
|
||||
#include <net/tcp.h>
|
||||
|
||||
static inline bool is_skb_wmem(const struct sk_buff *skb)
|
||||
{
|
||||
return skb->destructor == sock_wfree ||
|
||||
skb->destructor == __sock_wfree ||
|
||||
(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree);
|
||||
}
|
||||
#endif
|
@ -419,7 +419,7 @@ static struct ctl_table net_core_table[] = {
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dolongvec_minmax_bpf_restricted,
|
||||
.extra1 = &long_one,
|
||||
.extra2 = &long_max,
|
||||
.extra2 = &bpf_jit_limit_max,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
@ -1374,12 +1374,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
|
||||
|
||||
for_each_available_child_of_node(ports, port) {
|
||||
err = of_property_read_u32(port, "reg", ®);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(port);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
if (reg >= ds->num_ports) {
|
||||
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
|
||||
port, reg, ds->num_ports);
|
||||
of_node_put(port);
|
||||
err = -EINVAL;
|
||||
goto out_put_node;
|
||||
}
|
||||
@ -1387,8 +1390,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
|
||||
dp = dsa_to_port(ds, reg);
|
||||
|
||||
err = dsa_port_parse_of(dp, port);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(port);
|
||||
goto out_put_node;
|
||||
}
|
||||
}
|
||||
|
||||
out_put_node:
|
||||
|
@ -486,10 +486,7 @@ static bool tcp_stream_is_readable(struct sock *sk, int target)
|
||||
{
|
||||
if (tcp_epollin_ready(sk, target))
|
||||
return true;
|
||||
|
||||
if (sk->sk_prot->stream_memory_read)
|
||||
return sk->sk_prot->stream_memory_read(sk);
|
||||
return false;
|
||||
return sk_is_readable(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -150,19 +150,6 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
|
||||
EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static bool tcp_bpf_stream_read(const struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
bool empty = true;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock))
|
||||
empty = list_empty(&psock->ingress_msg);
|
||||
rcu_read_unlock();
|
||||
return !empty;
|
||||
}
|
||||
|
||||
static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
long timeo)
|
||||
{
|
||||
@ -232,6 +219,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
bool cork = false, enospc = sk_msg_full(msg);
|
||||
struct sock *sk_redir;
|
||||
u32 tosend, delta = 0;
|
||||
u32 eval = __SK_NONE;
|
||||
int ret;
|
||||
|
||||
more_data:
|
||||
@ -275,13 +263,24 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
case __SK_REDIRECT:
|
||||
sk_redir = psock->sk_redir;
|
||||
sk_msg_apply_bytes(psock, tosend);
|
||||
if (!psock->apply_bytes) {
|
||||
/* Clean up before releasing the sock lock. */
|
||||
eval = psock->eval;
|
||||
psock->eval = __SK_NONE;
|
||||
psock->sk_redir = NULL;
|
||||
}
|
||||
if (psock->cork) {
|
||||
cork = true;
|
||||
psock->cork = NULL;
|
||||
}
|
||||
sk_msg_return(sk, msg, tosend);
|
||||
release_sock(sk);
|
||||
|
||||
ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
|
||||
|
||||
if (eval == __SK_REDIRECT)
|
||||
sock_put(sk_redir);
|
||||
|
||||
lock_sock(sk);
|
||||
if (unlikely(ret < 0)) {
|
||||
int free = sk_msg_free_nocharge(sk, msg);
|
||||
@ -479,7 +478,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
|
||||
prot[TCP_BPF_BASE].unhash = sock_map_unhash;
|
||||
prot[TCP_BPF_BASE].close = sock_map_close;
|
||||
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
|
||||
prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
|
||||
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
|
||||
|
||||
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
|
||||
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
|
||||
|
@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
|
||||
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
|
||||
EXPORT_SYMBOL(tcp_md5_needed);
|
||||
|
||||
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
|
||||
{
|
||||
if (!old)
|
||||
return true;
|
||||
|
||||
/* l3index always overrides non-l3index */
|
||||
if (old->l3index && new->l3index == 0)
|
||||
return false;
|
||||
if (old->l3index == 0 && new->l3index)
|
||||
return true;
|
||||
|
||||
return old->prefixlen < new->prefixlen;
|
||||
}
|
||||
|
||||
/* Find the Key structure for an address. */
|
||||
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
||||
const union tcp_md5_addr *addr,
|
||||
@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
||||
lockdep_sock_is_held(sk)) {
|
||||
if (key->family != family)
|
||||
continue;
|
||||
if (key->l3index && key->l3index != l3index)
|
||||
if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
|
||||
continue;
|
||||
if (family == AF_INET) {
|
||||
mask = inet_make_mask(key->prefixlen);
|
||||
@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
||||
match = false;
|
||||
}
|
||||
|
||||
if (match && (!best_match ||
|
||||
key->prefixlen > best_match->prefixlen))
|
||||
if (match && better_md5_match(best_match, key))
|
||||
best_match = key;
|
||||
}
|
||||
return best_match;
|
||||
@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
|
||||
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
|
||||
const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen,
|
||||
int l3index)
|
||||
int l3index, u8 flags)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_md5sig_key *key;
|
||||
@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
|
||||
lockdep_sock_is_held(sk)) {
|
||||
if (key->family != family)
|
||||
continue;
|
||||
if (key->l3index && key->l3index != l3index)
|
||||
if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
|
||||
continue;
|
||||
if (key->l3index != l3index)
|
||||
continue;
|
||||
if (!memcmp(&key->addr, addr, size) &&
|
||||
key->prefixlen == prefixlen)
|
||||
@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
|
||||
|
||||
/* This can be called on a newly created socket, from other files */
|
||||
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen, int l3index,
|
||||
int family, u8 prefixlen, int l3index, u8 flags,
|
||||
const u8 *newkey, u8 newkeylen, gfp_t gfp)
|
||||
{
|
||||
/* Add Key to the list */
|
||||
@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_md5sig_info *md5sig;
|
||||
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
|
||||
if (key) {
|
||||
/* Pre-existing entry - just update that one.
|
||||
* Note that the key might be used concurrently.
|
||||
@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
key->family = family;
|
||||
key->prefixlen = prefixlen;
|
||||
key->l3index = l3index;
|
||||
key->flags = flags;
|
||||
memcpy(&key->addr, addr,
|
||||
(family == AF_INET6) ? sizeof(struct in6_addr) :
|
||||
sizeof(struct in_addr));
|
||||
@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
EXPORT_SYMBOL(tcp_md5_do_add);
|
||||
|
||||
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
|
||||
u8 prefixlen, int l3index)
|
||||
u8 prefixlen, int l3index, u8 flags)
|
||||
{
|
||||
struct tcp_md5sig_key *key;
|
||||
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
|
||||
if (!key)
|
||||
return -ENOENT;
|
||||
hlist_del_rcu(&key->node);
|
||||
@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
||||
const union tcp_md5_addr *addr;
|
||||
u8 prefixlen = 32;
|
||||
int l3index = 0;
|
||||
u8 flags;
|
||||
|
||||
if (optlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
||||
if (sin->sin_family != AF_INET)
|
||||
return -EINVAL;
|
||||
|
||||
flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
|
||||
prefixlen = cmd.tcpm_prefixlen;
|
||||
@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
|
||||
struct net_device *dev;
|
||||
|
||||
@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
||||
addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
|
||||
|
||||
if (!cmd.tcpm_keylen)
|
||||
return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
|
||||
return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
|
||||
|
||||
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
|
||||
return -EINVAL;
|
||||
|
||||
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
|
||||
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
|
||||
}
|
||||
|
||||
@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
* memory, then we end up not copying the key
|
||||
* across. Shucks.
|
||||
*/
|
||||
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
|
||||
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
|
||||
key->key, key->keylen, GFP_ATOMIC);
|
||||
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
|
||||
}
|
||||
|
@ -2867,6 +2867,9 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
|
||||
mask &= ~(EPOLLIN | EPOLLRDNORM);
|
||||
|
||||
/* psock ingress_msg queue should not contain any bad checksum frames */
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
return mask;
|
||||
|
||||
}
|
||||
|
@ -114,6 +114,7 @@ static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
|
||||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = udp_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
}
|
||||
|
||||
static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
|
||||
|
@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
||||
|
||||
int ip6_forward(struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
struct inet6_skb_parm *opt = IP6CB(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct inet6_dev *idev;
|
||||
u32 mtu;
|
||||
|
||||
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
||||
if (net->ipv6.devconf_all->forwarding == 0)
|
||||
goto error;
|
||||
|
||||
|
@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
||||
static inline bool
|
||||
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
|
||||
{
|
||||
bool r;
|
||||
pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
|
||||
invert ? '!' : ' ', min, id, max);
|
||||
r = (id >= min && id <= max) ^ invert;
|
||||
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
|
||||
return r;
|
||||
return (id >= min && id <= max) ^ invert;
|
||||
}
|
||||
|
||||
static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
return false;
|
||||
}
|
||||
|
||||
pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
|
||||
pr_debug("TYPE %04X ", rh->type);
|
||||
pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
|
||||
|
||||
pr_debug("IPv6 RT segsleft %02X ",
|
||||
segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
|
||||
rh->segments_left,
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_SGS)));
|
||||
pr_debug("type %02X %02X %02X ",
|
||||
rtinfo->rt_type, rh->type,
|
||||
(!(rtinfo->flags & IP6T_RT_TYP) ||
|
||||
((rtinfo->rt_type == rh->type) ^
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_TYP))));
|
||||
pr_debug("len %02X %04X %02X ",
|
||||
rtinfo->hdrlen, hdrlen,
|
||||
!(rtinfo->flags & IP6T_RT_LEN) ||
|
||||
((rtinfo->hdrlen == hdrlen) ^
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_LEN)));
|
||||
pr_debug("res %02X %02X %02X ",
|
||||
rtinfo->flags & IP6T_RT_RES,
|
||||
((const struct rt0_hdr *)rh)->reserved,
|
||||
!((rtinfo->flags & IP6T_RT_RES) &&
|
||||
(((const struct rt0_hdr *)rh)->reserved)));
|
||||
|
||||
ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
|
||||
rh->segments_left,
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
|
||||
@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
reserved),
|
||||
sizeof(_reserved),
|
||||
&_reserved);
|
||||
if (!rp) {
|
||||
par->hotdrop = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = (*rp == 0);
|
||||
}
|
||||
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
if (!(rtinfo->flags & IP6T_RT_FST)) {
|
||||
return ret;
|
||||
} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
|
||||
pr_debug("Not strict ");
|
||||
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
|
||||
pr_debug("There isn't enough space\n");
|
||||
return false;
|
||||
} else {
|
||||
unsigned int i = 0;
|
||||
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
for (temp = 0;
|
||||
temp < (unsigned int)((hdrlen - 8) / 16);
|
||||
temp++) {
|
||||
@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
|
||||
pr_debug("i=%d temp=%d;\n", i, temp);
|
||||
if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
|
||||
i++;
|
||||
}
|
||||
if (i == rtinfo->addrnr)
|
||||
break;
|
||||
}
|
||||
pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
|
||||
if (i == rtinfo->addrnr)
|
||||
return ret;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
pr_debug("Strict ");
|
||||
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
|
||||
pr_debug("There isn't enough space\n");
|
||||
return false;
|
||||
} else {
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
for (temp = 0; temp < rtinfo->addrnr; temp++) {
|
||||
ap = skb_header_pointer(skb,
|
||||
ptr
|
||||
@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
|
||||
break;
|
||||
}
|
||||
pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
|
||||
if (temp == rtinfo->addrnr &&
|
||||
temp == (unsigned int)((hdrlen - 8) / 16))
|
||||
return ret;
|
||||
|
@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
||||
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
|
||||
int l3index = 0;
|
||||
u8 prefixlen;
|
||||
u8 flags;
|
||||
|
||||
if (optlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
||||
if (sin6->sin6_family != AF_INET6)
|
||||
return -EINVAL;
|
||||
|
||||
flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
|
||||
prefixlen = cmd.tcpm_prefixlen;
|
||||
@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
||||
prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
|
||||
}
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
|
||||
struct net_device *dev;
|
||||
|
||||
@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
||||
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
|
||||
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
|
||||
AF_INET, prefixlen,
|
||||
l3index);
|
||||
l3index, flags);
|
||||
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
|
||||
AF_INET6, prefixlen, l3index);
|
||||
AF_INET6, prefixlen, l3index, flags);
|
||||
}
|
||||
|
||||
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
|
||||
@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
||||
|
||||
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
|
||||
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
|
||||
AF_INET, prefixlen, l3index,
|
||||
AF_INET, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen,
|
||||
GFP_KERNEL);
|
||||
|
||||
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
|
||||
AF_INET6, prefixlen, l3index,
|
||||
AF_INET6, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
|
||||
}
|
||||
|
||||
@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
* across. Shucks.
|
||||
*/
|
||||
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
|
||||
AF_INET6, 128, l3index, key->key, key->keylen,
|
||||
AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
|
||||
sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
}
|
||||
#endif
|
||||
|
@ -672,7 +672,7 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
|
||||
u8 *ie, u8 ie_len)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
const u8 *cap;
|
||||
const struct element *cap;
|
||||
const struct ieee80211_he_operation *he_oper = NULL;
|
||||
|
||||
sband = ieee80211_get_sband(sdata);
|
||||
@ -687,9 +687,10 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
sdata->vif.bss_conf.he_support = true;
|
||||
|
||||
cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
|
||||
if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
|
||||
he_oper = (void *)(cap + 3);
|
||||
cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
|
||||
if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
|
||||
cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
|
||||
he_oper = (void *)(cap->data + 1);
|
||||
|
||||
if (he_oper)
|
||||
sdata->vif.bss_conf.he_oper.params =
|
||||
|
@ -485,11 +485,11 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
|
||||
mpext = mptcp_get_ext(skb);
|
||||
data_len = mpext ? mpext->data_len : 0;
|
||||
|
||||
/* we will check ext_copy.data_len in mptcp_write_options() to
|
||||
/* we will check ops->data_len in mptcp_write_options() to
|
||||
* discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
|
||||
* TCPOLEN_MPTCP_MPC_ACK
|
||||
*/
|
||||
opts->ext_copy.data_len = data_len;
|
||||
opts->data_len = data_len;
|
||||
opts->suboptions = OPTION_MPTCP_MPC_ACK;
|
||||
opts->sndr_key = subflow->local_key;
|
||||
opts->rcvr_key = subflow->remote_key;
|
||||
@ -505,9 +505,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
|
||||
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
|
||||
if (opts->csum_reqd) {
|
||||
/* we need to propagate more info to csum the pseudo hdr */
|
||||
opts->ext_copy.data_seq = mpext->data_seq;
|
||||
opts->ext_copy.subflow_seq = mpext->subflow_seq;
|
||||
opts->ext_copy.csum = mpext->csum;
|
||||
opts->data_seq = mpext->data_seq;
|
||||
opts->subflow_seq = mpext->subflow_seq;
|
||||
opts->csum = mpext->csum;
|
||||
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
|
||||
}
|
||||
*size = ALIGN(len, 4);
|
||||
@ -1227,7 +1227,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
|
||||
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
|
||||
}
|
||||
|
||||
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum)
|
||||
{
|
||||
struct csum_pseudo_header header;
|
||||
__wsum csum;
|
||||
@ -1237,15 +1237,21 @@ static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
* always the 64-bit value, irrespective of what length is used in the
|
||||
* DSS option itself.
|
||||
*/
|
||||
header.data_seq = cpu_to_be64(mpext->data_seq);
|
||||
header.subflow_seq = htonl(mpext->subflow_seq);
|
||||
header.data_len = htons(mpext->data_len);
|
||||
header.data_seq = cpu_to_be64(data_seq);
|
||||
header.subflow_seq = htonl(subflow_seq);
|
||||
header.data_len = htons(data_len);
|
||||
header.csum = 0;
|
||||
|
||||
csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum));
|
||||
csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum));
|
||||
return (__force u16)csum_fold(csum);
|
||||
}
|
||||
|
||||
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
{
|
||||
return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
|
||||
mpext->csum);
|
||||
}
|
||||
|
||||
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
struct mptcp_out_options *opts)
|
||||
{
|
||||
@ -1337,7 +1343,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
len = TCPOLEN_MPTCP_MPC_SYN;
|
||||
} else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
|
||||
len = TCPOLEN_MPTCP_MPC_SYNACK;
|
||||
} else if (opts->ext_copy.data_len) {
|
||||
} else if (opts->data_len) {
|
||||
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
|
||||
if (opts->csum_reqd)
|
||||
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
|
||||
@ -1366,14 +1372,17 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
|
||||
put_unaligned_be64(opts->rcvr_key, ptr);
|
||||
ptr += 2;
|
||||
if (!opts->ext_copy.data_len)
|
||||
if (!opts->data_len)
|
||||
goto mp_capable_done;
|
||||
|
||||
if (opts->csum_reqd) {
|
||||
put_unaligned_be32(opts->ext_copy.data_len << 16 |
|
||||
mptcp_make_csum(&opts->ext_copy), ptr);
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
__mptcp_make_csum(opts->data_seq,
|
||||
opts->subflow_seq,
|
||||
opts->data_len,
|
||||
opts->csum), ptr);
|
||||
} else {
|
||||
put_unaligned_be32(opts->ext_copy.data_len << 16 |
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
|
||||
}
|
||||
ptr += 1;
|
||||
|
@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
|
||||
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
|
||||
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
|
||||
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
|
||||
#ifdef CONFIG_IP_VS_DEBUG
|
||||
/* Global sysctls must be ro in non-init netns */
|
||||
if (!net_eq(net, &init_net))
|
||||
tbl[idx++].mode = 0444;
|
||||
#endif
|
||||
|
||||
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
|
||||
if (ipvs->sysctl_hdr == NULL) {
|
||||
|
@ -342,12 +342,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
|
||||
return;
|
||||
}
|
||||
|
||||
/* UNREGISTER events are also happening on netns exit.
|
||||
*
|
||||
* Although nf_tables core releases all tables/chains, only this event
|
||||
* handler provides guarantee that hook->ops.dev is still accessible,
|
||||
* so we cannot skip exiting net namespaces.
|
||||
*/
|
||||
__nft_release_basechain(ctx);
|
||||
}
|
||||
|
||||
@ -366,6 +360,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
||||
event != NETDEV_CHANGENAME)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!check_net(ctx.net))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nft_net = nft_pernet(ctx.net);
|
||||
mutex_lock(&nft_net->commit_mutex);
|
||||
list_for_each_entry(table, &nft_net->tables, list) {
|
||||
|
@ -156,6 +156,12 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
|
||||
void *arg,
|
||||
struct sctp_cmd_seq *commands);
|
||||
|
||||
static enum sctp_disposition
|
||||
__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands);
|
||||
|
||||
/* Small helper function that checks if the chunk length
|
||||
* is of the appropriate length. The 'required_length' argument
|
||||
* is set to be the size of a specific chunk we are testing.
|
||||
@ -337,6 +343,14 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
|
||||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* Normally, this would cause an ABORT with a Protocol Violation
|
||||
* error, but since we don't have an association, we'll
|
||||
* just discard the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the packet is an OOTB packet which is temporarily on the
|
||||
* control endpoint, respond with an ABORT.
|
||||
*/
|
||||
@ -351,14 +365,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
|
||||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* Normally, this would cause an ABORT with a Protocol Violation
|
||||
* error, but since we don't have an association, we'll
|
||||
* just discard the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the INIT is coming toward a closing socket, we'll send back
|
||||
* and ABORT. Essentially, this catches the race of INIT being
|
||||
* backloged to the socket at the same time as the user issues close().
|
||||
@ -704,6 +710,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
|
||||
struct sock *sk;
|
||||
int error = 0;
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the packet is an OOTB packet which is temporarily on the
|
||||
* control endpoint, respond with an ABORT.
|
||||
*/
|
||||
@ -718,7 +727,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
|
||||
* in sctp_unpack_cookie().
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* If the endpoint is not listening or if the number of associations
|
||||
* on the TCP-style socket exceed the max backlog, respond with an
|
||||
@ -1524,20 +1534,16 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
|
||||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
|
||||
* Tag.
|
||||
*/
|
||||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* In this case, we generate a protocol violation since we have
|
||||
* an association established.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
|
||||
return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
|
||||
|
||||
@ -1882,9 +1888,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
|
||||
* its peer.
|
||||
*/
|
||||
if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
|
||||
disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
|
||||
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
|
||||
chunk, commands);
|
||||
disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
|
||||
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
|
||||
chunk, commands);
|
||||
if (SCTP_DISPOSITION_NOMEM == disposition)
|
||||
goto nomem;
|
||||
|
||||
@ -2202,9 +2208,11 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
|
||||
* enough for the chunk header. Cookie length verification is
|
||||
* done later.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) {
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* "Decode" the chunk. We have no optional parameters so we
|
||||
* are in good shape.
|
||||
@ -2341,7 +2349,7 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
|
||||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
@ -2387,7 +2395,7 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
|
||||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
@ -2657,7 +2665,7 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
|
||||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
@ -2970,13 +2978,11 @@ enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
|
||||
* that belong to this association, it should discard the INIT chunk and
|
||||
* retransmit the SHUTDOWN ACK chunk.
|
||||
*/
|
||||
enum sctp_disposition sctp_sf_do_9_2_reshutack(
|
||||
struct net *net,
|
||||
const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type,
|
||||
void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
static enum sctp_disposition
|
||||
__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
struct sctp_chunk *reply;
|
||||
@ -3010,6 +3016,26 @@ enum sctp_disposition sctp_sf_do_9_2_reshutack(
|
||||
return SCTP_DISPOSITION_NOMEM;
|
||||
}
|
||||
|
||||
enum sctp_disposition
|
||||
sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/*
|
||||
* sctp_sf_do_ecn_cwr
|
||||
*
|
||||
@ -3662,6 +3688,9 @@ enum sctp_disposition sctp_sf_ootb(struct net *net,
|
||||
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
|
||||
ch = (struct sctp_chunkhdr *)chunk->chunk_hdr;
|
||||
do {
|
||||
/* Report violation if the chunk is less then minimal */
|
||||
@ -3777,12 +3806,6 @@ static enum sctp_disposition sctp_sf_shut_8_4_5(
|
||||
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
|
||||
|
||||
/* If the chunk length is invalid, we don't want to process
|
||||
* the reset of the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* We need to discard the rest of the packet to prevent
|
||||
* potential boomming attacks from additional bundled chunks.
|
||||
* This is documented in SCTP Threats ID.
|
||||
@ -3810,6 +3833,9 @@ enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net,
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
|
||||
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
@ -3845,6 +3871,11 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* ADD-IP: Section 4.1.1
|
||||
* This chunk MUST be sent in an authenticated way by using
|
||||
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
|
||||
@ -3853,13 +3884,7 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
|
||||
*/
|
||||
if (!asoc->peer.asconf_capable ||
|
||||
(!net->sctp.addip_noauth && !chunk->auth))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
hdr = (struct sctp_addiphdr *)chunk->skb->data;
|
||||
serial = ntohl(hdr->serial);
|
||||
@ -3988,6 +4013,12 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* Make sure that the ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(asconf_ack,
|
||||
sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* ADD-IP, Section 4.1.2:
|
||||
* This chunk MUST be sent in an authenticated way by using
|
||||
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
|
||||
@ -3996,14 +4027,7 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
|
||||
*/
|
||||
if (!asoc->peer.asconf_capable ||
|
||||
(!net->sctp.addip_noauth && !asconf_ack->auth))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Make sure that the ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(asconf_ack,
|
||||
sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
|
||||
rcvd_serial = ntohl(addip_hdr->serial);
|
||||
@ -4575,6 +4599,9 @@ enum sctp_disposition sctp_sf_discard_chunk(struct net *net,
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the chunk has a valid length.
|
||||
* Since we don't know the chunk type, we use a general
|
||||
* chunkhdr structure to make a comparison.
|
||||
@ -4642,6 +4669,9 @@ enum sctp_disposition sctp_sf_violation(struct net *net,
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
@ -6348,6 +6378,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(
|
||||
* yet.
|
||||
*/
|
||||
switch (chunk->chunk_hdr->type) {
|
||||
case SCTP_CID_INIT:
|
||||
case SCTP_CID_INIT_ACK:
|
||||
{
|
||||
struct sctp_initack_chunk *initack;
|
||||
|
@ -1057,7 +1057,7 @@ static void smc_connect_work(struct work_struct *work)
|
||||
if (smc->clcsock->sk->sk_err) {
|
||||
smc->sk.sk_err = smc->clcsock->sk->sk_err;
|
||||
} else if ((1 << smc->clcsock->sk->sk_state) &
|
||||
(TCPF_SYN_SENT | TCP_SYN_RECV)) {
|
||||
(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||
rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
|
||||
if ((rc == -EPIPE) &&
|
||||
((1 << smc->clcsock->sk->sk_state) &
|
||||
|
@ -1822,7 +1822,7 @@ void smc_llc_link_active(struct smc_link *link)
|
||||
link->smcibdev->ibdev->name, link->ibport);
|
||||
link->state = SMC_LNK_ACTIVE;
|
||||
if (link->lgr->llc_testlink_time) {
|
||||
link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
|
||||
link->llc_testlink_time = link->lgr->llc_testlink_time;
|
||||
schedule_delayed_work(&link->llc_testlink_wrk,
|
||||
link->llc_testlink_time);
|
||||
}
|
||||
|
@ -2285,43 +2285,53 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
|
||||
u16 key_gen = msg_key_gen(hdr);
|
||||
u16 size = msg_data_sz(hdr);
|
||||
u8 *data = msg_data(hdr);
|
||||
unsigned int keylen;
|
||||
|
||||
/* Verify whether the size can exist in the packet */
|
||||
if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
|
||||
pr_debug("%s: message data size is too small\n", rx->name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
|
||||
|
||||
/* Verify the supplied size values */
|
||||
if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
|
||||
keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
|
||||
pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spin_lock(&rx->lock);
|
||||
if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
|
||||
pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
|
||||
rx->skey, key_gen, rx->key_gen);
|
||||
goto exit;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Allocate memory for the key */
|
||||
skey = kmalloc(size, GFP_ATOMIC);
|
||||
if (unlikely(!skey)) {
|
||||
pr_err("%s: unable to allocate memory for skey\n", rx->name);
|
||||
goto exit;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Copy key from msg data */
|
||||
skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
|
||||
skey->keylen = keylen;
|
||||
memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
|
||||
memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
|
||||
skey->keylen);
|
||||
|
||||
/* Sanity check */
|
||||
if (unlikely(size != tipc_aead_key_size(skey))) {
|
||||
kfree(skey);
|
||||
skey = NULL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rx->key_gen = key_gen;
|
||||
rx->skey_mode = msg_key_mode(hdr);
|
||||
rx->skey = skey;
|
||||
rx->nokey = 0;
|
||||
mb(); /* for nokey flag */
|
||||
|
||||
exit:
|
||||
exit_unlock:
|
||||
spin_unlock(&rx->lock);
|
||||
|
||||
exit:
|
||||
/* Schedule the key attaching on this crypto */
|
||||
if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
|
||||
return true;
|
||||
|
@ -681,12 +681,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
||||
|
||||
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
|
||||
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
|
||||
prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
|
||||
prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
|
||||
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
|
||||
|
||||
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
|
||||
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
|
||||
prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
|
||||
prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
|
||||
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
|
@ -35,6 +35,7 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/splice.h>
|
||||
@ -43,6 +44,14 @@
|
||||
#include <net/strparser.h>
|
||||
#include <net/tls.h>
|
||||
|
||||
noinline void tls_err_abort(struct sock *sk, int err)
|
||||
{
|
||||
WARN_ON_ONCE(err >= 0);
|
||||
/* sk->sk_err should contain a positive error code. */
|
||||
sk->sk_err = -err;
|
||||
sk_error_report(sk);
|
||||
}
|
||||
|
||||
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
|
||||
unsigned int recursion_level)
|
||||
{
|
||||
@ -419,7 +428,7 @@ int tls_tx_records(struct sock *sk, int flags)
|
||||
|
||||
tx_err:
|
||||
if (rc < 0 && rc != -EAGAIN)
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -450,7 +459,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
|
||||
|
||||
/* If err is already set on socket, return the same code */
|
||||
if (sk->sk_err) {
|
||||
ctx->async_wait.err = sk->sk_err;
|
||||
ctx->async_wait.err = -sk->sk_err;
|
||||
} else {
|
||||
ctx->async_wait.err = err;
|
||||
tls_err_abort(sk, err);
|
||||
@ -763,7 +772,7 @@ static int tls_push_record(struct sock *sk, int flags,
|
||||
msg_pl->sg.size + prot->tail_size, i);
|
||||
if (rc < 0) {
|
||||
if (rc != -EINPROGRESS) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
if (split) {
|
||||
tls_ctx->pending_open_record_frags = true;
|
||||
tls_merge_open_record(sk, rec, tmp, orig_end);
|
||||
@ -1827,7 +1836,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
||||
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
|
||||
&chunk, &zc, async_capable);
|
||||
if (err < 0 && err != -EINPROGRESS) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
goto recv_end;
|
||||
}
|
||||
|
||||
@ -2007,7 +2016,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
goto splice_read_end;
|
||||
}
|
||||
ctx->decrypted = 1;
|
||||
@ -2026,7 +2035,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||
return copied ? : err;
|
||||
}
|
||||
|
||||
bool tls_sw_stream_read(const struct sock *sk)
|
||||
bool tls_sw_sock_is_readable(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
@ -3052,6 +3052,8 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
|
||||
/* readable? */
|
||||
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
/* Connection-based need to check for termination and startup */
|
||||
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
|
||||
@ -3091,6 +3093,8 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
|
||||
/* readable? */
|
||||
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
/* Connection-based need to check for termination and startup */
|
||||
if (sk->sk_type == SOCK_SEQPACKET) {
|
||||
|
@ -102,6 +102,7 @@ static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto
|
||||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = unix_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
}
|
||||
|
||||
static void unix_stream_bpf_rebuild_protos(struct proto *prot,
|
||||
@ -110,6 +111,7 @@ static void unix_stream_bpf_rebuild_protos(struct proto *prot,
|
||||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = unix_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
prot->unhash = sock_map_unhash;
|
||||
}
|
||||
|
||||
|
@ -100,6 +100,8 @@ struct cfg80211_registered_device {
|
||||
struct work_struct propagate_cac_done_wk;
|
||||
|
||||
struct work_struct mgmt_registrations_update_wk;
|
||||
/* lock for all wdev lists */
|
||||
spinlock_t mgmt_registrations_lock;
|
||||
|
||||
/* must be last because of the way we do wiphy_priv(),
|
||||
* and it should at least be aligned to NETDEV_ALIGN */
|
||||
|
@ -452,9 +452,9 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
|
||||
|
||||
lockdep_assert_held(&rdev->wiphy.mtx);
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
if (!wdev->mgmt_registrations_need_update) {
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -479,7 +479,7 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
|
||||
rcu_read_unlock();
|
||||
|
||||
wdev->mgmt_registrations_need_update = 0;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
rdev_update_mgmt_frame_registrations(rdev, wdev, &upd);
|
||||
}
|
||||
@ -503,6 +503,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
||||
int match_len, bool multicast_rx,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *nreg;
|
||||
int err = 0;
|
||||
u16 mgmt_type;
|
||||
@ -548,7 +549,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
||||
if (!nreg)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
|
||||
int mlen = min(match_len, reg->match_len);
|
||||
@ -583,7 +584,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
||||
list_add(&nreg->list, &wdev->mgmt_registrations);
|
||||
}
|
||||
wdev->mgmt_registrations_need_update = 1;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
cfg80211_mgmt_registrations_update(wdev);
|
||||
|
||||
@ -591,7 +592,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
||||
|
||||
out:
|
||||
kfree(nreg);
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -602,7 +603,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *tmp;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
|
||||
if (reg->nlportid != nlportid)
|
||||
@ -615,7 +616,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
||||
schedule_work(&rdev->mgmt_registrations_update_wk);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
if (nlportid && rdev->crit_proto_nlportid == nlportid) {
|
||||
rdev->crit_proto_nlportid = 0;
|
||||
@ -628,15 +629,16 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
||||
|
||||
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *tmp;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
}
|
||||
wdev->mgmt_registrations_need_update = 1;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
cfg80211_mgmt_registrations_update(wdev);
|
||||
}
|
||||
@ -784,7 +786,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
|
||||
data = buf + ieee80211_hdrlen(mgmt->frame_control);
|
||||
data_len = len - ieee80211_hdrlen(mgmt->frame_control);
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
|
||||
if (reg->frame_type != ftype)
|
||||
@ -808,7 +810,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
trace_cfg80211_return_bool(result);
|
||||
return result;
|
||||
|
@ -418,14 +418,17 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
|
||||
}
|
||||
ssid_len = ssid[1];
|
||||
ssid = ssid + 2;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* check if nontrans_bss is in the list */
|
||||
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
|
||||
if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
|
||||
if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/* add to the list */
|
||||
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
|
||||
return 0;
|
||||
|
@ -1028,14 +1028,14 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
|
||||
!(rdev->wiphy.interface_modes & (1 << ntype)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* if it's part of a bridge, reject changing type to station/ibss */
|
||||
if (netif_is_bridge_port(dev) &&
|
||||
(ntype == NL80211_IFTYPE_ADHOC ||
|
||||
ntype == NL80211_IFTYPE_STATION ||
|
||||
ntype == NL80211_IFTYPE_P2P_CLIENT))
|
||||
return -EBUSY;
|
||||
|
||||
if (ntype != otype) {
|
||||
/* if it's part of a bridge, reject changing type to station/ibss */
|
||||
if (netif_is_bridge_port(dev) &&
|
||||
(ntype == NL80211_IFTYPE_ADHOC ||
|
||||
ntype == NL80211_IFTYPE_STATION ||
|
||||
ntype == NL80211_IFTYPE_P2P_CLIENT))
|
||||
return -EBUSY;
|
||||
|
||||
dev->ieee80211_ptr->use_4addr = false;
|
||||
dev->ieee80211_ptr->mesh_id_up_len = 0;
|
||||
wdev_lock(dev->ieee80211_ptr);
|
||||
|
@ -918,6 +918,13 @@ void key_change_session_keyring(struct callback_head *twork)
|
||||
return;
|
||||
}
|
||||
|
||||
/* If get_ucounts fails more bits are needed in the refcount */
|
||||
if (unlikely(!get_ucounts(old->ucounts))) {
|
||||
WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
|
||||
put_cred(new);
|
||||
return;
|
||||
}
|
||||
|
||||
new-> uid = old-> uid;
|
||||
new-> euid = old-> euid;
|
||||
new-> suid = old-> suid;
|
||||
@ -927,6 +934,7 @@ void key_change_session_keyring(struct callback_head *twork)
|
||||
new-> sgid = old-> sgid;
|
||||
new->fsgid = old->fsgid;
|
||||
new->user = get_uid(old->user);
|
||||
new->ucounts = old->ucounts;
|
||||
new->user_ns = get_user_ns(old->user_ns);
|
||||
new->group_info = get_group_info(old->group_info);
|
||||
|
||||
|
@ -2535,6 +2535,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
||||
@ -6405,6 +6406,44 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
|
||||
}
|
||||
}
|
||||
|
||||
/* GPIO1 = amplifier on/off
|
||||
* GPIO3 = mic mute LED
|
||||
*/
|
||||
static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix, int action)
|
||||
{
|
||||
static const hda_nid_t conn[] = { 0x02 };
|
||||
|
||||
struct alc_spec *spec = codec->spec;
|
||||
static const struct hda_pintbl pincfgs[] = {
|
||||
{ 0x14, 0x90170110 }, /* front/high speakers */
|
||||
{ 0x17, 0x90170130 }, /* back/bass speakers */
|
||||
{ }
|
||||
};
|
||||
|
||||
//enable micmute led
|
||||
alc_fixup_hp_gpio_led(codec, action, 0x00, 0x04);
|
||||
|
||||
switch (action) {
|
||||
case HDA_FIXUP_ACT_PRE_PROBE:
|
||||
spec->micmute_led_polarity = 1;
|
||||
/* needed for amp of back speakers */
|
||||
spec->gpio_mask |= 0x01;
|
||||
spec->gpio_dir |= 0x01;
|
||||
snd_hda_apply_pincfgs(codec, pincfgs);
|
||||
/* share DAC to have unified volume control */
|
||||
snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
|
||||
snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
|
||||
break;
|
||||
case HDA_FIXUP_ACT_INIT:
|
||||
/* need to toggle GPIO to enable the amp of back speakers */
|
||||
alc_update_gpio_data(codec, 0x01, true);
|
||||
msleep(100);
|
||||
alc_update_gpio_data(codec, 0x01, false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix, int action)
|
||||
{
|
||||
@ -6557,6 +6596,7 @@ enum {
|
||||
ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
|
||||
ALC280_FIXUP_HP_9480M,
|
||||
ALC245_FIXUP_HP_X360_AMP,
|
||||
ALC285_FIXUP_HP_SPECTRE_X360_EB1,
|
||||
ALC288_FIXUP_DELL_HEADSET_MODE,
|
||||
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
ALC288_FIXUP_DELL_XPS_13,
|
||||
@ -8250,6 +8290,10 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc285_fixup_hp_spectre_x360,
|
||||
},
|
||||
[ALC285_FIXUP_HP_SPECTRE_X360_EB1] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc285_fixup_hp_spectre_x360_eb1
|
||||
},
|
||||
[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc285_fixup_ideapad_s740_coef,
|
||||
@ -8584,6 +8628,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
|
||||
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
|
||||
SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
|
||||
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
@ -9005,6 +9051,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
||||
{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
|
||||
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
|
||||
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
|
||||
{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
|
||||
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
|
||||
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
|
||||
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
|
||||
|
@ -1601,6 +1601,7 @@ config SND_SOC_WCD938X_SDW
|
||||
tristate "WCD9380/WCD9385 Codec - SDW"
|
||||
select SND_SOC_WCD938X
|
||||
select SND_SOC_WCD_MBHC
|
||||
select REGMAP_IRQ
|
||||
depends on SOUNDWIRE
|
||||
select REGMAP_SOUNDWIRE
|
||||
help
|
||||
|
@ -922,7 +922,6 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
|
||||
struct snd_soc_component *component = dai->component;
|
||||
struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
|
||||
unsigned int regval;
|
||||
u8 fullScaleVol;
|
||||
int ret;
|
||||
|
||||
if (mute) {
|
||||
@ -993,20 +992,11 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
|
||||
cs42l42->stream_use |= 1 << stream;
|
||||
|
||||
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
/* Read the headphone load */
|
||||
regval = snd_soc_component_read(component, CS42L42_LOAD_DET_RCSTAT);
|
||||
if (((regval & CS42L42_RLA_STAT_MASK) >> CS42L42_RLA_STAT_SHIFT) ==
|
||||
CS42L42_RLA_STAT_15_OHM) {
|
||||
fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
|
||||
} else {
|
||||
fullScaleVol = 0;
|
||||
}
|
||||
|
||||
/* Un-mute the headphone, set the full scale volume flag */
|
||||
/* Un-mute the headphone */
|
||||
snd_soc_component_update_bits(component, CS42L42_HP_CTL,
|
||||
CS42L42_HP_ANA_AMUTE_MASK |
|
||||
CS42L42_HP_ANA_BMUTE_MASK |
|
||||
CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
|
||||
CS42L42_HP_ANA_BMUTE_MASK,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,12 +305,19 @@ static int cs4341_spi_probe(struct spi_device *spi)
|
||||
return cs4341_probe(&spi->dev);
|
||||
}
|
||||
|
||||
static const struct spi_device_id cs4341_spi_ids[] = {
|
||||
{ "cs4341a" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(spi, cs4341_spi_ids);
|
||||
|
||||
static struct spi_driver cs4341_spi_driver = {
|
||||
.driver = {
|
||||
.name = "cs4341-spi",
|
||||
.of_match_table = of_match_ptr(cs4341_dt_ids),
|
||||
},
|
||||
.probe = cs4341_spi_probe,
|
||||
.id_table = cs4341_spi_ids,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -867,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
|
||||
struct regmap *regmap = nau8824->regmap;
|
||||
int adc_value, event = 0, event_mask = 0;
|
||||
|
||||
snd_soc_dapm_enable_pin(dapm, "MICBIAS");
|
||||
snd_soc_dapm_enable_pin(dapm, "SAR");
|
||||
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
|
||||
snd_soc_dapm_force_enable_pin(dapm, "SAR");
|
||||
snd_soc_dapm_sync(dapm);
|
||||
|
||||
msleep(100);
|
||||
|
@ -36,6 +36,7 @@ static const struct of_device_id pcm179x_of_match[] = {
|
||||
MODULE_DEVICE_TABLE(of, pcm179x_of_match);
|
||||
|
||||
static const struct spi_device_id pcm179x_spi_ids[] = {
|
||||
{ "pcm1792a", 0 },
|
||||
{ "pcm179x", 0 },
|
||||
{ },
|
||||
};
|
||||
|
@ -116,6 +116,8 @@ static const struct reg_default pcm512x_reg_defaults[] = {
|
||||
{ PCM512x_FS_SPEED_MODE, 0x00 },
|
||||
{ PCM512x_IDAC_1, 0x01 },
|
||||
{ PCM512x_IDAC_2, 0x00 },
|
||||
{ PCM512x_I2S_1, 0x02 },
|
||||
{ PCM512x_I2S_2, 0x00 },
|
||||
};
|
||||
|
||||
static bool pcm512x_readable(struct device *dev, unsigned int reg)
|
||||
|
@ -4144,10 +4144,10 @@ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
|
||||
{
|
||||
struct wcd938x_priv *wcd = dev_get_drvdata(comp->dev);
|
||||
|
||||
if (!jack)
|
||||
if (jack)
|
||||
return wcd_mbhc_start(wcd->wcd_mbhc, &wcd->mbhc_cfg, jack);
|
||||
|
||||
wcd_mbhc_stop(wcd->wcd_mbhc);
|
||||
else
|
||||
wcd_mbhc_stop(wcd->wcd_mbhc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -742,9 +742,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
|
||||
int i, j, k;
|
||||
int ret;
|
||||
|
||||
if (!(iface1 & (1<<6))) {
|
||||
dev_dbg(component->dev,
|
||||
"Codec is slave mode, no need to configure clock\n");
|
||||
/*
|
||||
* For Slave mode clocking should still be configured,
|
||||
* so this if statement should be removed, but some platform
|
||||
* may not work if the sysclk is not configured, to avoid such
|
||||
* compatible issue, just add '!wm8960->sysclk' condition in
|
||||
* this if statement.
|
||||
*/
|
||||
if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
|
||||
dev_warn(component->dev,
|
||||
"slave mode, but proceeding with no clock configuration\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -487,8 +487,9 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* clear DPATH RESET */
|
||||
/* set DPATH RESET */
|
||||
m_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
|
||||
v_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
|
||||
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, m_ctl, v_ctl);
|
||||
if (ret < 0) {
|
||||
dev_err(dai->dev, "Error while setting EXT_CTRL: %d\n", ret);
|
||||
@ -590,10 +591,6 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
|
||||
val |= FSL_XCVR_EXT_CTRL_CMDC_RESET(tx);
|
||||
}
|
||||
|
||||
/* set DPATH RESET */
|
||||
mask |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
|
||||
val |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
|
||||
|
||||
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, mask, val);
|
||||
if (ret < 0) {
|
||||
dev_err(dai->dev, "Err setting DPATH RESET: %d\n", ret);
|
||||
@ -643,6 +640,16 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
|
||||
dev_err(dai->dev, "Failed to enable DMA: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* clear DPATH RESET */
|
||||
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
|
||||
FSL_XCVR_EXT_CTRL_DPTH_RESET(tx),
|
||||
0);
|
||||
if (ret < 0) {
|
||||
dev_err(dai->dev, "Failed to clear DPATH RESET: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
case SNDRV_PCM_TRIGGER_STOP:
|
||||
case SNDRV_PCM_TRIGGER_SUSPEND:
|
||||
|
@ -456,12 +456,12 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
|
||||
|
||||
static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
static const char * const mic_name[] = { "in1", "in2" };
|
||||
struct snd_soc_acpi_mach *mach = dev_get_platdata(dev);
|
||||
struct property_entry props[MAX_NO_PROPS] = {};
|
||||
struct byt_cht_es8316_private *priv;
|
||||
const struct dmi_system_id *dmi_id;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct snd_soc_acpi_mach *mach;
|
||||
struct fwnode_handle *fwnode;
|
||||
const char *platform_name;
|
||||
struct acpi_device *adev;
|
||||
@ -476,7 +476,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
mach = dev->platform_data;
|
||||
/* fix index of codec dai */
|
||||
for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
|
||||
if (!strcmp(byt_cht_es8316_dais[i].codecs->name,
|
||||
@ -494,7 +493,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
put_device(&adev->dev);
|
||||
byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
|
||||
} else {
|
||||
dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
|
||||
dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -533,11 +532,8 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
|
||||
/* get the clock */
|
||||
priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
|
||||
if (IS_ERR(priv->mclk)) {
|
||||
ret = PTR_ERR(priv->mclk);
|
||||
dev_err(dev, "clk_get pmc_plt_clk_3 failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(priv->mclk))
|
||||
return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
|
||||
|
||||
/* get speaker enable GPIO */
|
||||
codec_dev = acpi_get_first_physical_node(adev);
|
||||
@ -567,22 +563,13 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
|
||||
devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
|
||||
priv->speaker_en_gpio =
|
||||
gpiod_get_index(codec_dev, "speaker-enable", 0,
|
||||
/* see comment in byt_cht_es8316_resume */
|
||||
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
|
||||
|
||||
gpiod_get_optional(codec_dev, "speaker-enable",
|
||||
/* see comment in byt_cht_es8316_resume() */
|
||||
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
|
||||
if (IS_ERR(priv->speaker_en_gpio)) {
|
||||
ret = PTR_ERR(priv->speaker_en_gpio);
|
||||
switch (ret) {
|
||||
case -ENOENT:
|
||||
priv->speaker_en_gpio = NULL;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "get speaker GPIO failed: %d\n", ret);
|
||||
fallthrough;
|
||||
case -EPROBE_DEFER:
|
||||
goto err_put_codec;
|
||||
}
|
||||
ret = dev_err_probe(dev, PTR_ERR(priv->speaker_en_gpio),
|
||||
"get speaker GPIO failed\n");
|
||||
goto err_put_codec;
|
||||
}
|
||||
|
||||
snprintf(components_string, sizeof(components_string),
|
||||
@ -597,7 +584,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
|
||||
byt_cht_es8316_card.long_name = long_name;
|
||||
#endif
|
||||
|
||||
sof_parent = snd_soc_acpi_sof_parent(&pdev->dev);
|
||||
sof_parent = snd_soc_acpi_sof_parent(dev);
|
||||
|
||||
/* set card and driver name */
|
||||
if (sof_parent) {
|
||||
|
@ -2620,6 +2620,7 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
|
||||
INIT_LIST_HEAD(&component->dai_list);
|
||||
INIT_LIST_HEAD(&component->dobj_list);
|
||||
INIT_LIST_HEAD(&component->card_list);
|
||||
INIT_LIST_HEAD(&component->list);
|
||||
mutex_init(&component->io_mutex);
|
||||
|
||||
component->name = fmt_single_name(dev, &component->id);
|
||||
|
@ -2561,6 +2561,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
|
||||
const char *pin, int status)
|
||||
{
|
||||
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
|
||||
int ret = 0;
|
||||
|
||||
dapm_assert_locked(dapm);
|
||||
|
||||
@ -2573,13 +2574,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
|
||||
dapm_mark_dirty(w, "pin configuration");
|
||||
dapm_widget_invalidate_input_paths(w);
|
||||
dapm_widget_invalidate_output_paths(w);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
w->connected = status;
|
||||
if (status == 0)
|
||||
w->force = 0;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3583,14 +3585,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
|
||||
{
|
||||
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
|
||||
const char *pin = (const char *)kcontrol->private_value;
|
||||
int ret;
|
||||
|
||||
if (ucontrol->value.integer.value[0])
|
||||
snd_soc_dapm_enable_pin(&card->dapm, pin);
|
||||
ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
|
||||
else
|
||||
snd_soc_dapm_disable_pin(&card->dapm, pin);
|
||||
ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
|
||||
|
||||
snd_soc_dapm_sync(&card->dapm);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
|
||||
|
||||
@ -4023,7 +4026,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
|
||||
|
||||
rtd->params_select = ucontrol->value.enumerated.item[0];
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1198,6 +1198,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
|
||||
cval->res = 1;
|
||||
}
|
||||
break;
|
||||
case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
|
||||
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
|
||||
usb_audio_info(chip,
|
||||
"set resolution quirk: cval->res = 16\n");
|
||||
cval->res = 16;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4012,6 +4012,38 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Sennheiser GSP670
|
||||
* Change order of interfaces loaded
|
||||
*/
|
||||
USB_DEVICE(0x1395, 0x0300),
|
||||
.bInterfaceClass = USB_CLASS_PER_INTERFACE,
|
||||
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
||||
.ifnum = QUIRK_ANY_INTERFACE,
|
||||
.type = QUIRK_COMPOSITE,
|
||||
.data = &(const struct snd_usb_audio_quirk[]) {
|
||||
// Communication
|
||||
{
|
||||
.ifnum = 3,
|
||||
.type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||||
},
|
||||
// Recording
|
||||
{
|
||||
.ifnum = 4,
|
||||
.type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||||
},
|
||||
// Main
|
||||
{
|
||||
.ifnum = 1,
|
||||
.type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||||
},
|
||||
{
|
||||
.ifnum = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
#undef USB_DEVICE_VENDOR_SPEC
|
||||
#undef USB_AUDIO_DEVICE
|
||||
|
@ -1719,6 +1719,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
|
||||
*/
|
||||
fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
|
||||
break;
|
||||
case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
|
||||
/* mic works only when ep packet size is set to wMaxPacketSize */
|
||||
fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -1884,10 +1889,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
|
||||
QUIRK_FLAG_GET_SAMPLE_RATE),
|
||||
DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
|
||||
QUIRK_FLAG_GET_SAMPLE_RATE),
|
||||
DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
|
||||
QUIRK_FLAG_IGNORE_CTL_ERROR),
|
||||
DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
|
||||
QUIRK_FLAG_GET_SAMPLE_RATE),
|
||||
DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
|
||||
QUIRK_FLAG_ALIGN_TRANSFER),
|
||||
DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
|
||||
QUIRK_FLAG_GET_SAMPLE_RATE),
|
||||
DEVICE_FLG(0x09da, 0x2695, /* A4Tech FHD 1080p webcam */
|
||||
QUIRK_FLAG_DISABLE_AUTOSUSPEND | QUIRK_FLAG_GET_SAMPLE_RATE),
|
||||
|
||||
|
@ -787,6 +787,8 @@ $(OUTPUT)dlfilters/%.o: dlfilters/%.c include/perf/perf_dlfilter.h
|
||||
$(Q)$(MKDIR) -p $(OUTPUT)dlfilters
|
||||
$(QUIET_CC)$(CC) -c -Iinclude $(EXTRA_CFLAGS) -o $@ -fpic $<
|
||||
|
||||
.SECONDARY: $(DLFILTERS:.so=.o)
|
||||
|
||||
$(OUTPUT)dlfilters/%.so: $(OUTPUT)dlfilters/%.o
|
||||
$(QUIET_LINK)$(CC) $(EXTRA_CFLAGS) -shared -o $@ $<
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user