mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-30 14:52:17 +00:00
Baboons are after me!
* Save me from them gay baboons ! * Save me !!!
This commit is contained in:
parent
c07e86d045
commit
d01923b4c1
@ -397,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init warn_bootconfig(char *str)
|
||||
{
|
||||
/* The 'bootconfig' has been handled by bootconfig_params(). */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init setup_boot_config(void)
|
||||
{
|
||||
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
|
||||
@ -475,9 +481,8 @@ static int __init warn_bootconfig(char *str)
|
||||
pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
|
||||
return 0;
|
||||
}
|
||||
early_param("bootconfig", warn_bootconfig);
|
||||
|
||||
#endif
|
||||
early_param("bootconfig", warn_bootconfig);
|
||||
|
||||
/* Change NUL term back to "=", to make "param" the whole string. */
|
||||
static void __init repair_env_string(char *param, char *val)
|
||||
|
@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
|
||||
#ifdef CONFIG_CGROUPS
|
||||
BPF_CALL_0(bpf_get_current_cgroup_id)
|
||||
{
|
||||
struct cgroup *cgrp = task_dfl_cgroup(current);
|
||||
struct cgroup *cgrp;
|
||||
u64 cgrp_id;
|
||||
|
||||
return cgroup_id(cgrp);
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(current);
|
||||
cgrp_id = cgroup_id(cgrp);
|
||||
rcu_read_unlock();
|
||||
|
||||
return cgrp_id;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
|
||||
@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
|
||||
|
||||
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
|
||||
{
|
||||
struct cgroup *cgrp = task_dfl_cgroup(current);
|
||||
struct cgroup *cgrp;
|
||||
struct cgroup *ancestor;
|
||||
u64 cgrp_id;
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(current);
|
||||
ancestor = cgroup_ancestor(cgrp, ancestor_level);
|
||||
if (!ancestor)
|
||||
return 0;
|
||||
return cgroup_id(ancestor);
|
||||
cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return cgrp_id;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
|
||||
|
@ -5150,8 +5150,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
if (func_id != BPF_FUNC_ringbuf_output &&
|
||||
func_id != BPF_FUNC_ringbuf_reserve &&
|
||||
func_id != BPF_FUNC_ringbuf_submit &&
|
||||
func_id != BPF_FUNC_ringbuf_discard &&
|
||||
func_id != BPF_FUNC_ringbuf_query)
|
||||
goto error;
|
||||
break;
|
||||
@ -5260,6 +5258,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_ringbuf_output:
|
||||
case BPF_FUNC_ringbuf_reserve:
|
||||
case BPF_FUNC_ringbuf_query:
|
||||
if (map->map_type != BPF_MAP_TYPE_RINGBUF)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_get_stackid:
|
||||
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
|
||||
goto error;
|
||||
@ -11410,10 +11414,11 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
|
||||
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
|
||||
* [0, off) and [off, end) to new locations, so the patched range stays zero
|
||||
*/
|
||||
static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
struct bpf_prog *new_prog, u32 off, u32 cnt)
|
||||
static void adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
struct bpf_insn_aux_data *new_data,
|
||||
struct bpf_prog *new_prog, u32 off, u32 cnt)
|
||||
{
|
||||
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
||||
struct bpf_insn_aux_data *old_data = env->insn_aux_data;
|
||||
struct bpf_insn *insn = new_prog->insnsi;
|
||||
u32 old_seen = old_data[off].seen;
|
||||
u32 prog_len;
|
||||
@ -11426,12 +11431,9 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
|
||||
|
||||
if (cnt == 1)
|
||||
return 0;
|
||||
return;
|
||||
prog_len = new_prog->len;
|
||||
new_data = vzalloc(array_size(prog_len,
|
||||
sizeof(struct bpf_insn_aux_data)));
|
||||
if (!new_data)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
|
||||
memcpy(new_data + off + cnt - 1, old_data + off,
|
||||
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
||||
@ -11442,7 +11444,6 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
}
|
||||
env->insn_aux_data = new_data;
|
||||
vfree(old_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
|
||||
@ -11477,6 +11478,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
struct bpf_prog *new_prog;
|
||||
struct bpf_insn_aux_data *new_data = NULL;
|
||||
|
||||
if (len > 1) {
|
||||
new_data = vzalloc(array_size(env->prog->len + len - 1,
|
||||
sizeof(struct bpf_insn_aux_data)));
|
||||
if (!new_data)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
|
||||
if (IS_ERR(new_prog)) {
|
||||
@ -11484,10 +11493,10 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
|
||||
verbose(env,
|
||||
"insn %d cannot be patched due to 16-bit range\n",
|
||||
env->insn_aux_data[off].orig_idx);
|
||||
vfree(new_data);
|
||||
return NULL;
|
||||
}
|
||||
if (adjust_insn_aux_data(env, new_prog, off, len))
|
||||
return NULL;
|
||||
adjust_insn_aux_data(env, new_data, new_prog, off, len);
|
||||
adjust_subprog_starts(env, off, len);
|
||||
adjust_poke_descs(new_prog, off, len);
|
||||
return new_prog;
|
||||
@ -11663,6 +11672,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
|
||||
if (aux_data[i].seen)
|
||||
continue;
|
||||
memcpy(insn + i, &trap, sizeof(trap));
|
||||
aux_data[i].zext_dst = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -12003,6 +12013,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
if (is_narrower_load && size < target_size) {
|
||||
u8 shift = bpf_ctx_narrow_access_offset(
|
||||
off, size, size_default) * 8;
|
||||
if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
|
||||
verbose(env, "bpf verifier narrow ctx load misconfigured\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ctx_field_size <= 4) {
|
||||
if (shift)
|
||||
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
|
||||
|
@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
|
||||
{
|
||||
cfi_check_fn fn;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
rcu_read_lock_sched_notrace();
|
||||
fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
|
||||
rcu_read_unlock_sched();
|
||||
rcu_read_unlock_sched_notrace();
|
||||
|
||||
return fn;
|
||||
}
|
||||
@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
|
||||
cfi_check_fn fn = NULL;
|
||||
struct module *mod;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
rcu_read_lock_sched_notrace();
|
||||
mod = __module_address(ptr);
|
||||
if (mod)
|
||||
fn = mod->cfi_check;
|
||||
rcu_read_unlock_sched();
|
||||
rcu_read_unlock_sched_notrace();
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
@ -1114,7 +1114,7 @@ enum subparts_cmd {
|
||||
* cpus_allowed can be granted or an error code will be returned.
|
||||
*
|
||||
* For partcmd_disable, the cpuset is being transofrmed from a partition
|
||||
* root back to a non-partition root. any CPUs in cpus_allowed that are in
|
||||
* root back to a non-partition root. Any CPUs in cpus_allowed that are in
|
||||
* parent's subparts_cpus will be taken away from that cpumask and put back
|
||||
* into parent's effective_cpus. 0 should always be returned.
|
||||
*
|
||||
@ -1148,6 +1148,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
struct cpuset *parent = parent_cs(cpuset);
|
||||
int adding; /* Moving cpus from effective_cpus to subparts_cpus */
|
||||
int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
|
||||
int new_prs;
|
||||
bool part_error = false; /* Partition error? */
|
||||
|
||||
percpu_rwsem_assert_held(&cpuset_rwsem);
|
||||
@ -1183,6 +1184,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
* A cpumask update cannot make parent's effective_cpus become empty.
|
||||
*/
|
||||
adding = deleting = false;
|
||||
new_prs = cpuset->partition_root_state;
|
||||
if (cmd == partcmd_enable) {
|
||||
cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
|
||||
adding = true;
|
||||
@ -1225,7 +1227,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
/*
|
||||
* partcmd_update w/o newmask:
|
||||
*
|
||||
* addmask = cpus_allowed & parent->effectiveb_cpus
|
||||
* addmask = cpus_allowed & parent->effective_cpus
|
||||
*
|
||||
* Note that parent's subparts_cpus may have been
|
||||
* pre-shrunk in case there is a change in the cpu list.
|
||||
@ -1247,11 +1249,11 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
switch (cpuset->partition_root_state) {
|
||||
case PRS_ENABLED:
|
||||
if (part_error)
|
||||
cpuset->partition_root_state = PRS_ERROR;
|
||||
new_prs = PRS_ERROR;
|
||||
break;
|
||||
case PRS_ERROR:
|
||||
if (!part_error)
|
||||
cpuset->partition_root_state = PRS_ENABLED;
|
||||
new_prs = PRS_ENABLED;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -1260,10 +1262,10 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
part_error = (prev_prs == PRS_ERROR);
|
||||
}
|
||||
|
||||
if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
|
||||
if (!part_error && (new_prs == PRS_ERROR))
|
||||
return 0; /* Nothing need to be done */
|
||||
|
||||
if (cpuset->partition_root_state == PRS_ERROR) {
|
||||
if (new_prs == PRS_ERROR) {
|
||||
/*
|
||||
* Remove all its cpus from parent's subparts_cpus.
|
||||
*/
|
||||
@ -1272,7 +1274,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
parent->subparts_cpus);
|
||||
}
|
||||
|
||||
if (!adding && !deleting)
|
||||
if (!adding && !deleting && (new_prs == cpuset->partition_root_state))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -1299,6 +1301,9 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
||||
}
|
||||
|
||||
parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
|
||||
|
||||
if (cpuset->partition_root_state != new_prs)
|
||||
cpuset->partition_root_state = new_prs;
|
||||
spin_unlock_irq(&callback_lock);
|
||||
|
||||
return cmd == partcmd_update;
|
||||
@ -1321,6 +1326,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
struct cpuset *cp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
bool need_rebuild_sched_domains = false;
|
||||
int new_prs;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
|
||||
@ -1360,17 +1366,18 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
* update_tasks_cpumask() again for tasks in the parent
|
||||
* cpuset if the parent's subparts_cpus changes.
|
||||
*/
|
||||
if ((cp != cs) && cp->partition_root_state) {
|
||||
new_prs = cp->partition_root_state;
|
||||
if ((cp != cs) && new_prs) {
|
||||
switch (parent->partition_root_state) {
|
||||
case PRS_DISABLED:
|
||||
/*
|
||||
* If parent is not a partition root or an
|
||||
* invalid partition root, clear the state
|
||||
* state and the CS_CPU_EXCLUSIVE flag.
|
||||
* invalid partition root, clear its state
|
||||
* and its CS_CPU_EXCLUSIVE flag.
|
||||
*/
|
||||
WARN_ON_ONCE(cp->partition_root_state
|
||||
!= PRS_ERROR);
|
||||
cp->partition_root_state = 0;
|
||||
new_prs = PRS_DISABLED;
|
||||
|
||||
/*
|
||||
* clear_bit() is an atomic operation and
|
||||
@ -1391,11 +1398,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
/*
|
||||
* When parent is invalid, it has to be too.
|
||||
*/
|
||||
cp->partition_root_state = PRS_ERROR;
|
||||
if (cp->nr_subparts_cpus) {
|
||||
cp->nr_subparts_cpus = 0;
|
||||
cpumask_clear(cp->subparts_cpus);
|
||||
}
|
||||
new_prs = PRS_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1407,8 +1410,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
spin_lock_irq(&callback_lock);
|
||||
|
||||
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
|
||||
if (cp->nr_subparts_cpus &&
|
||||
(cp->partition_root_state != PRS_ENABLED)) {
|
||||
if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
|
||||
cp->nr_subparts_cpus = 0;
|
||||
cpumask_clear(cp->subparts_cpus);
|
||||
} else if (cp->nr_subparts_cpus) {
|
||||
@ -1435,6 +1437,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
= cpumask_weight(cp->subparts_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
if (new_prs != cp->partition_root_state)
|
||||
cp->partition_root_state = new_prs;
|
||||
|
||||
spin_unlock_irq(&callback_lock);
|
||||
|
||||
WARN_ON(!is_in_v2_mode() &&
|
||||
@ -1937,34 +1943,32 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
||||
|
||||
/*
|
||||
* update_prstate - update partititon_root_state
|
||||
* cs: the cpuset to update
|
||||
* val: 0 - disabled, 1 - enabled
|
||||
* cs: the cpuset to update
|
||||
* new_prs: new partition root state
|
||||
*
|
||||
* Call with cpuset_mutex held.
|
||||
*/
|
||||
static int update_prstate(struct cpuset *cs, int val)
|
||||
static int update_prstate(struct cpuset *cs, int new_prs)
|
||||
{
|
||||
int err;
|
||||
int err, old_prs = cs->partition_root_state;
|
||||
struct cpuset *parent = parent_cs(cs);
|
||||
struct tmpmasks tmp;
|
||||
struct tmpmasks tmpmask;
|
||||
|
||||
if ((val != 0) && (val != 1))
|
||||
return -EINVAL;
|
||||
if (val == cs->partition_root_state)
|
||||
if (old_prs == new_prs)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Cannot force a partial or invalid partition root to a full
|
||||
* partition root.
|
||||
*/
|
||||
if (val && cs->partition_root_state)
|
||||
if (new_prs && (old_prs == PRS_ERROR))
|
||||
return -EINVAL;
|
||||
|
||||
if (alloc_cpumasks(NULL, &tmp))
|
||||
if (alloc_cpumasks(NULL, &tmpmask))
|
||||
return -ENOMEM;
|
||||
|
||||
err = -EINVAL;
|
||||
if (!cs->partition_root_state) {
|
||||
if (!old_prs) {
|
||||
/*
|
||||
* Turning on partition root requires setting the
|
||||
* CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
|
||||
@ -1978,31 +1982,27 @@ static int update_prstate(struct cpuset *cs, int val)
|
||||
goto out;
|
||||
|
||||
err = update_parent_subparts_cpumask(cs, partcmd_enable,
|
||||
NULL, &tmp);
|
||||
NULL, &tmpmask);
|
||||
if (err) {
|
||||
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
|
||||
goto out;
|
||||
}
|
||||
cs->partition_root_state = PRS_ENABLED;
|
||||
} else {
|
||||
/*
|
||||
* Turning off partition root will clear the
|
||||
* CS_CPU_EXCLUSIVE bit.
|
||||
*/
|
||||
if (cs->partition_root_state == PRS_ERROR) {
|
||||
cs->partition_root_state = 0;
|
||||
if (old_prs == PRS_ERROR) {
|
||||
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = update_parent_subparts_cpumask(cs, partcmd_disable,
|
||||
NULL, &tmp);
|
||||
NULL, &tmpmask);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cs->partition_root_state = 0;
|
||||
|
||||
/* Turning off CS_CPU_EXCLUSIVE will not return error */
|
||||
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
|
||||
}
|
||||
@ -2015,11 +2015,17 @@ static int update_prstate(struct cpuset *cs, int val)
|
||||
update_tasks_cpumask(parent);
|
||||
|
||||
if (parent->child_ecpus_count)
|
||||
update_sibling_cpumasks(parent, cs, &tmp);
|
||||
update_sibling_cpumasks(parent, cs, &tmpmask);
|
||||
|
||||
rebuild_sched_domains_locked();
|
||||
out:
|
||||
free_cpumasks(NULL, &tmp);
|
||||
if (!err) {
|
||||
spin_lock_irq(&callback_lock);
|
||||
cs->partition_root_state = new_prs;
|
||||
spin_unlock_irq(&callback_lock);
|
||||
}
|
||||
|
||||
free_cpumasks(NULL, &tmpmask);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3060,7 +3066,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
parent = parent_cs(cs);
|
||||
parent = parent_cs(cs);
|
||||
compute_effective_cpumask(&new_cpus, cs, parent);
|
||||
nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
|
||||
|
||||
@ -3082,8 +3088,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
|
||||
(parent->partition_root_state == PRS_ERROR))) {
|
||||
if (cs->nr_subparts_cpus) {
|
||||
spin_lock_irq(&callback_lock);
|
||||
cs->nr_subparts_cpus = 0;
|
||||
cpumask_clear(cs->subparts_cpus);
|
||||
spin_unlock_irq(&callback_lock);
|
||||
compute_effective_cpumask(&new_cpus, cs, parent);
|
||||
}
|
||||
|
||||
@ -3097,7 +3105,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
cpumask_empty(&new_cpus)) {
|
||||
update_parent_subparts_cpumask(cs, partcmd_disable,
|
||||
NULL, tmp);
|
||||
spin_lock_irq(&callback_lock);
|
||||
cs->partition_root_state = PRS_ERROR;
|
||||
spin_unlock_irq(&callback_lock);
|
||||
}
|
||||
cpuset_force_rebuild();
|
||||
}
|
||||
@ -3168,6 +3178,13 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
||||
cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
|
||||
mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
|
||||
|
||||
/*
|
||||
* In the rare case that hotplug removes all the cpus in subparts_cpus,
|
||||
* we assumed that cpus are updated.
|
||||
*/
|
||||
if (!cpus_updated && top_cpuset.nr_subparts_cpus)
|
||||
cpus_updated = true;
|
||||
|
||||
/* synchronize cpus_allowed to cpu_active_mask */
|
||||
if (cpus_updated) {
|
||||
spin_lock_irq(&callback_lock);
|
||||
|
@ -13,19 +13,32 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
|
||||
/*
|
||||
* atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
|
||||
* Notifications for cpu_pm will be issued by the idle task itself, which can
|
||||
* never block, IOW it requires using a raw_spinlock_t.
|
||||
*/
|
||||
static struct {
|
||||
struct raw_notifier_head chain;
|
||||
raw_spinlock_t lock;
|
||||
} cpu_pm_notifier = {
|
||||
.chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
|
||||
};
|
||||
|
||||
static int cpu_pm_notify(enum cpu_pm_event event)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* atomic_notifier_call_chain has a RCU read critical section, which
|
||||
* could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
|
||||
* RCU know this.
|
||||
* This introduces a RCU read critical section, which could be
|
||||
* disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
|
||||
* this.
|
||||
*/
|
||||
rcu_irq_enter_irqson();
|
||||
ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
|
||||
rcu_read_lock();
|
||||
ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
|
||||
rcu_read_unlock();
|
||||
rcu_irq_exit_irqson();
|
||||
|
||||
return notifier_to_errno(ret);
|
||||
@ -33,10 +46,13 @@ static int cpu_pm_notify(enum cpu_pm_event event)
|
||||
|
||||
static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
rcu_irq_enter_irqson();
|
||||
ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
|
||||
raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
|
||||
ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
|
||||
raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
|
||||
rcu_irq_exit_irqson();
|
||||
|
||||
return notifier_to_errno(ret);
|
||||
@ -49,12 +65,17 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
|
||||
* Add a driver to a list of drivers that are notified about
|
||||
* CPU and CPU cluster low power entry and exit.
|
||||
*
|
||||
* This function may sleep, and has the same return conditions as
|
||||
* raw_notifier_chain_register.
|
||||
* This function has the same return conditions as raw_notifier_chain_register.
|
||||
*/
|
||||
int cpu_pm_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
|
||||
ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
|
||||
raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
|
||||
|
||||
@ -64,12 +85,17 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
|
||||
*
|
||||
* Remove a driver from the CPU PM notifier list.
|
||||
*
|
||||
* This function may sleep, and has the same return conditions as
|
||||
* raw_notifier_chain_unregister.
|
||||
* This function has the same return conditions as raw_notifier_chain_unregister.
|
||||
*/
|
||||
int cpu_pm_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
|
||||
ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
|
||||
raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
|
||||
|
||||
|
@ -828,10 +828,10 @@ void __init fork_init(void)
|
||||
for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
|
||||
init_user_ns.ucount_max[i] = max_threads/2;
|
||||
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY);
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY);
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
|
||||
set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
|
||||
|
@ -799,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
|
||||
|
||||
__irq_timings_store(irq, irqs, ti->intervals[i]);
|
||||
if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
|
||||
ret = -EBADSLT;
|
||||
pr_err("Failed to store in the circular buffer\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (irqs->count != ti->count) {
|
||||
ret = -ERANGE;
|
||||
pr_err("Count differs\n");
|
||||
goto out;
|
||||
}
|
||||
|
@ -928,7 +928,6 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
||||
{
|
||||
struct mutex_waiter waiter;
|
||||
bool first = false;
|
||||
struct ww_mutex *ww;
|
||||
int ret;
|
||||
|
||||
@ -1007,6 +1006,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
|
||||
set_current_state(state);
|
||||
for (;;) {
|
||||
bool first;
|
||||
|
||||
/*
|
||||
* Once we hold wait_lock, we're serialized against
|
||||
* mutex_unlock() handing the lock off to us, do a trylock
|
||||
@ -1035,15 +1036,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
spin_unlock(&lock->wait_lock);
|
||||
schedule_preempt_disabled();
|
||||
|
||||
/*
|
||||
* ww_mutex needs to always recheck its position since its waiter
|
||||
* list is not FIFO ordered.
|
||||
*/
|
||||
if (ww_ctx || !first) {
|
||||
first = __mutex_waiter_is_first(lock, &waiter);
|
||||
if (first)
|
||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
||||
}
|
||||
first = __mutex_waiter_is_first(lock, &waiter);
|
||||
if (first)
|
||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
||||
|
||||
set_current_state(state);
|
||||
/*
|
||||
|
@ -170,7 +170,9 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
|
||||
/* Compute the cost of each performance state. */
|
||||
fmax = (u64) table[nr_states - 1].frequency;
|
||||
for (i = 0; i < nr_states; i++) {
|
||||
table[i].cost = div64_u64(fmax * table[i].power,
|
||||
unsigned long power_res = em_scale_power(table[i].power);
|
||||
|
||||
table[i].cost = div64_u64(fmax * power_res,
|
||||
table[i].frequency);
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm_para.h>
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Controlling CPU stall warnings, including delay calculation.
|
||||
@ -267,8 +269,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
|
||||
struct task_struct *ts[8];
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp))
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return 0;
|
||||
}
|
||||
pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
|
||||
rnp->level, rnp->grplo, rnp->grphi);
|
||||
t = list_entry(rnp->gp_tasks->prev,
|
||||
@ -280,8 +284,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
|
||||
break;
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
for (i--; i; i--) {
|
||||
t = ts[i];
|
||||
while (i) {
|
||||
t = ts[--i];
|
||||
if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
|
||||
pr_cont(" P%d", t->pid);
|
||||
else
|
||||
@ -696,6 +700,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
(READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
|
||||
cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
||||
|
||||
/*
|
||||
* If a virtual machine is stopped by the host it can look to
|
||||
* the watchdog like an RCU stall. Check to see if the host
|
||||
* stopped the vm.
|
||||
*/
|
||||
if (kvm_check_and_clear_guest_paused())
|
||||
return;
|
||||
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(gps);
|
||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
||||
@ -705,6 +717,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
|
||||
cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
||||
|
||||
/*
|
||||
* If a virtual machine is stopped by the host it can look to
|
||||
* the watchdog like an RCU stall. Check to see if the host
|
||||
* stopped the vm.
|
||||
*/
|
||||
if (kvm_check_and_clear_guest_paused())
|
||||
return;
|
||||
|
||||
/* They had a few time units to dump stack, so complain. */
|
||||
print_other_cpu_stall(gs2, gps);
|
||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
||||
|
@ -237,9 +237,30 @@ static DEFINE_MUTEX(sched_core_mutex);
|
||||
static atomic_t sched_core_count;
|
||||
static struct cpumask sched_core_mask;
|
||||
|
||||
static void sched_core_lock(int cpu, unsigned long *flags)
|
||||
{
|
||||
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
||||
int t, i = 0;
|
||||
|
||||
local_irq_save(*flags);
|
||||
for_each_cpu(t, smt_mask)
|
||||
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
|
||||
}
|
||||
|
||||
static void sched_core_unlock(int cpu, unsigned long *flags)
|
||||
{
|
||||
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
||||
int t;
|
||||
|
||||
for_each_cpu(t, smt_mask)
|
||||
raw_spin_unlock(&cpu_rq(t)->__lock);
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
static void __sched_core_flip(bool enabled)
|
||||
{
|
||||
int cpu, t, i;
|
||||
unsigned long flags;
|
||||
int cpu, t;
|
||||
|
||||
cpus_read_lock();
|
||||
|
||||
@ -250,19 +271,12 @@ static void __sched_core_flip(bool enabled)
|
||||
for_each_cpu(cpu, &sched_core_mask) {
|
||||
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
||||
|
||||
i = 0;
|
||||
local_irq_disable();
|
||||
for_each_cpu(t, smt_mask) {
|
||||
/* supports up to SMT8 */
|
||||
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
|
||||
}
|
||||
sched_core_lock(cpu, &flags);
|
||||
|
||||
for_each_cpu(t, smt_mask)
|
||||
cpu_rq(t)->core_enabled = enabled;
|
||||
|
||||
for_each_cpu(t, smt_mask)
|
||||
raw_spin_unlock(&cpu_rq(t)->__lock);
|
||||
local_irq_enable();
|
||||
sched_core_unlock(cpu, &flags);
|
||||
|
||||
cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
|
||||
}
|
||||
@ -1619,6 +1633,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
|
||||
uclamp_rq_dec_id(rq, p, clamp_id);
|
||||
}
|
||||
|
||||
static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
|
||||
enum uclamp_id clamp_id)
|
||||
{
|
||||
if (!p->uclamp[clamp_id].active)
|
||||
return;
|
||||
|
||||
uclamp_rq_dec_id(rq, p, clamp_id);
|
||||
uclamp_rq_inc_id(rq, p, clamp_id);
|
||||
|
||||
/*
|
||||
* Make sure to clear the idle flag if we've transiently reached 0
|
||||
* active tasks on rq.
|
||||
*/
|
||||
if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
|
||||
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
|
||||
}
|
||||
|
||||
static inline void
|
||||
uclamp_update_active(struct task_struct *p)
|
||||
{
|
||||
@ -1642,12 +1673,8 @@ uclamp_update_active(struct task_struct *p)
|
||||
* affecting a valid clamp bucket, the next time it's enqueued,
|
||||
* it will already see the updated clamp bucket value.
|
||||
*/
|
||||
for_each_clamp_id(clamp_id) {
|
||||
if (p->uclamp[clamp_id].active) {
|
||||
uclamp_rq_dec_id(rq, p, clamp_id);
|
||||
uclamp_rq_inc_id(rq, p, clamp_id);
|
||||
}
|
||||
}
|
||||
for_each_clamp_id(clamp_id)
|
||||
uclamp_rq_reinc_id(rq, p, clamp_id);
|
||||
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
}
|
||||
@ -5736,35 +5763,109 @@ void queue_core_balance(struct rq *rq)
|
||||
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
|
||||
}
|
||||
|
||||
static inline void sched_core_cpu_starting(unsigned int cpu)
|
||||
static void sched_core_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
||||
struct rq *rq, *core_rq = NULL;
|
||||
int i;
|
||||
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
|
||||
unsigned long flags;
|
||||
int t;
|
||||
|
||||
core_rq = cpu_rq(cpu)->core;
|
||||
sched_core_lock(cpu, &flags);
|
||||
|
||||
if (!core_rq) {
|
||||
for_each_cpu(i, smt_mask) {
|
||||
rq = cpu_rq(i);
|
||||
if (rq->core && rq->core == rq)
|
||||
core_rq = rq;
|
||||
}
|
||||
WARN_ON_ONCE(rq->core != rq);
|
||||
|
||||
if (!core_rq)
|
||||
core_rq = cpu_rq(cpu);
|
||||
/* if we're the first, we'll be our own leader */
|
||||
if (cpumask_weight(smt_mask) == 1)
|
||||
goto unlock;
|
||||
|
||||
for_each_cpu(i, smt_mask) {
|
||||
rq = cpu_rq(i);
|
||||
|
||||
WARN_ON_ONCE(rq->core && rq->core != core_rq);
|
||||
rq->core = core_rq;
|
||||
/* find the leader */
|
||||
for_each_cpu(t, smt_mask) {
|
||||
if (t == cpu)
|
||||
continue;
|
||||
rq = cpu_rq(t);
|
||||
if (rq->core == rq) {
|
||||
core_rq = rq;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
|
||||
goto unlock;
|
||||
|
||||
/* install and validate core_rq */
|
||||
for_each_cpu(t, smt_mask) {
|
||||
rq = cpu_rq(t);
|
||||
|
||||
if (t == cpu)
|
||||
rq->core = core_rq;
|
||||
|
||||
WARN_ON_ONCE(rq->core != core_rq);
|
||||
}
|
||||
|
||||
unlock:
|
||||
sched_core_unlock(cpu, &flags);
|
||||
}
|
||||
|
||||
static void sched_core_cpu_deactivate(unsigned int cpu)
|
||||
{
|
||||
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
||||
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
|
||||
unsigned long flags;
|
||||
int t;
|
||||
|
||||
sched_core_lock(cpu, &flags);
|
||||
|
||||
/* if we're the last man standing, nothing to do */
|
||||
if (cpumask_weight(smt_mask) == 1) {
|
||||
WARN_ON_ONCE(rq->core != rq);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* if we're not the leader, nothing to do */
|
||||
if (rq->core != rq)
|
||||
goto unlock;
|
||||
|
||||
/* find a new leader */
|
||||
for_each_cpu(t, smt_mask) {
|
||||
if (t == cpu)
|
||||
continue;
|
||||
core_rq = cpu_rq(t);
|
||||
break;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!core_rq)) /* impossible */
|
||||
goto unlock;
|
||||
|
||||
/* copy the shared state to the new leader */
|
||||
core_rq->core_task_seq = rq->core_task_seq;
|
||||
core_rq->core_pick_seq = rq->core_pick_seq;
|
||||
core_rq->core_cookie = rq->core_cookie;
|
||||
core_rq->core_forceidle = rq->core_forceidle;
|
||||
core_rq->core_forceidle_seq = rq->core_forceidle_seq;
|
||||
|
||||
/* install new leader */
|
||||
for_each_cpu(t, smt_mask) {
|
||||
rq = cpu_rq(t);
|
||||
rq->core = core_rq;
|
||||
}
|
||||
|
||||
unlock:
|
||||
sched_core_unlock(cpu, &flags);
|
||||
}
|
||||
|
||||
static inline void sched_core_cpu_dying(unsigned int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
if (rq->core != rq)
|
||||
rq->core = rq;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SCHED_CORE */
|
||||
|
||||
static inline void sched_core_cpu_starting(unsigned int cpu) {}
|
||||
static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
|
||||
static inline void sched_core_cpu_dying(unsigned int cpu) {}
|
||||
|
||||
static struct task_struct *
|
||||
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
@ -8707,6 +8808,8 @@ int sched_cpu_deactivate(unsigned int cpu)
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
static_branch_dec_cpuslocked(&sched_smt_present);
|
||||
|
||||
sched_core_cpu_deactivate(cpu);
|
||||
#endif
|
||||
|
||||
if (!sched_smp_initialized)
|
||||
@ -8811,6 +8914,7 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
hrtick_clear(rq);
|
||||
sched_core_cpu_dying(cpu);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@ -9022,7 +9126,7 @@ void __init sched_init(void)
|
||||
atomic_set(&rq->nr_iowait, 0);
|
||||
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
rq->core = NULL;
|
||||
rq->core = rq;
|
||||
rq->core_pick = NULL;
|
||||
rq->core_enabled = 0;
|
||||
rq->core_tree = RB_ROOT;
|
||||
|
@ -1733,6 +1733,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
|
||||
*/
|
||||
raw_spin_rq_lock(rq);
|
||||
if (p->dl.dl_non_contending) {
|
||||
update_rq_clock(rq);
|
||||
sub_running_bw(&p->dl, &rq->dl);
|
||||
p->dl.dl_non_contending = 0;
|
||||
/*
|
||||
@ -2741,7 +2742,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
|
||||
dl_se->dl_runtime = attr->sched_runtime;
|
||||
dl_se->dl_deadline = attr->sched_deadline;
|
||||
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
|
||||
dl_se->flags = attr->sched_flags;
|
||||
dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
|
||||
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
|
||||
dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
|
||||
}
|
||||
@ -2754,7 +2755,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
|
||||
attr->sched_runtime = dl_se->dl_runtime;
|
||||
attr->sched_deadline = dl_se->dl_deadline;
|
||||
attr->sched_period = dl_se->dl_period;
|
||||
attr->sched_flags = dl_se->flags;
|
||||
attr->sched_flags &= ~SCHED_DL_FLAGS;
|
||||
attr->sched_flags |= dl_se->flags;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2851,7 +2853,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
|
||||
if (dl_se->dl_runtime != attr->sched_runtime ||
|
||||
dl_se->dl_deadline != attr->sched_deadline ||
|
||||
dl_se->dl_period != attr->sched_period ||
|
||||
dl_se->flags != attr->sched_flags)
|
||||
dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -388,6 +388,13 @@ void update_sched_domain_debugfs(void)
|
||||
{
|
||||
int cpu, i;
|
||||
|
||||
/*
|
||||
* This can unfortunately be invoked before sched_debug_init() creates
|
||||
* the debug directory. Don't touch sd_sysctl_cpus until then.
|
||||
*/
|
||||
if (!debugfs_sched)
|
||||
return;
|
||||
|
||||
if (!cpumask_available(sd_sysctl_cpus)) {
|
||||
if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
|
||||
return;
|
||||
|
@ -1486,7 +1486,7 @@ static inline bool is_core_idle(int cpu)
|
||||
if (cpu == sibling)
|
||||
continue;
|
||||
|
||||
if (!idle_cpu(cpu))
|
||||
if (!idle_cpu(sibling))
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -227,6 +227,8 @@ static inline void update_avg(u64 *avg, u64 sample)
|
||||
*/
|
||||
#define SCHED_FLAG_SUGOV 0x10000000
|
||||
|
||||
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
|
||||
|
||||
static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
|
||||
@ -1093,7 +1095,7 @@ struct rq {
|
||||
unsigned int core_sched_seq;
|
||||
struct rb_root core_tree;
|
||||
|
||||
/* shared state */
|
||||
/* shared state -- careful with sched_core_cpu_deactivate() */
|
||||
unsigned int core_task_seq;
|
||||
unsigned int core_pick_seq;
|
||||
unsigned long core_cookie;
|
||||
@ -2255,6 +2257,9 @@ static inline struct task_struct *get_push_task(struct rq *rq)
|
||||
if (p->nr_cpus_allowed == 1)
|
||||
return NULL;
|
||||
|
||||
if (p->migration_disabled)
|
||||
return NULL;
|
||||
|
||||
rq->push_busy = true;
|
||||
return get_task_struct(p);
|
||||
}
|
||||
|
@ -1482,6 +1482,8 @@ int sched_max_numa_distance;
|
||||
static int *sched_domains_numa_distance;
|
||||
static struct cpumask ***sched_domains_numa_masks;
|
||||
int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
|
||||
|
||||
static unsigned long __read_mostly *sched_numa_onlined_nodes;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -1833,6 +1835,16 @@ void sched_init_numa(void)
|
||||
sched_domains_numa_masks[i][j] = mask;
|
||||
|
||||
for_each_node(k) {
|
||||
/*
|
||||
* Distance information can be unreliable for
|
||||
* offline nodes, defer building the node
|
||||
* masks to its bringup.
|
||||
* This relies on all unique distance values
|
||||
* still being visible at init time.
|
||||
*/
|
||||
if (!node_online(j))
|
||||
continue;
|
||||
|
||||
if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
|
||||
sched_numa_warn("Node-distance not symmetric");
|
||||
|
||||
@ -1886,6 +1898,53 @@ void sched_init_numa(void)
|
||||
sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
|
||||
|
||||
init_numa_topology_type();
|
||||
|
||||
sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
|
||||
if (!sched_numa_onlined_nodes)
|
||||
return;
|
||||
|
||||
bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
|
||||
for_each_online_node(i)
|
||||
bitmap_set(sched_numa_onlined_nodes, i, 1);
|
||||
}
|
||||
|
||||
static void __sched_domains_numa_masks_set(unsigned int node)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* NUMA masks are not built for offline nodes in sched_init_numa().
|
||||
* Thus, when a CPU of a never-onlined-before node gets plugged in,
|
||||
* adding that new CPU to the right NUMA masks is not sufficient: the
|
||||
* masks of that CPU's node must also be updated.
|
||||
*/
|
||||
if (test_bit(node, sched_numa_onlined_nodes))
|
||||
return;
|
||||
|
||||
bitmap_set(sched_numa_onlined_nodes, node, 1);
|
||||
|
||||
for (i = 0; i < sched_domains_numa_levels; i++) {
|
||||
for (j = 0; j < nr_node_ids; j++) {
|
||||
if (!node_online(j) || node == j)
|
||||
continue;
|
||||
|
||||
if (node_distance(j, node) > sched_domains_numa_distance[i])
|
||||
continue;
|
||||
|
||||
/* Add remote nodes in our masks */
|
||||
cpumask_or(sched_domains_numa_masks[i][node],
|
||||
sched_domains_numa_masks[i][node],
|
||||
sched_domains_numa_masks[0][j]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A new node has been brought up, potentially changing the topology
|
||||
* classification.
|
||||
*
|
||||
* Note that this is racy vs any use of sched_numa_topology_type :/
|
||||
*/
|
||||
init_numa_topology_type();
|
||||
}
|
||||
|
||||
void sched_domains_numa_masks_set(unsigned int cpu)
|
||||
@ -1893,8 +1952,14 @@ void sched_domains_numa_masks_set(unsigned int cpu)
|
||||
int node = cpu_to_node(cpu);
|
||||
int i, j;
|
||||
|
||||
__sched_domains_numa_masks_set(node);
|
||||
|
||||
for (i = 0; i < sched_domains_numa_levels; i++) {
|
||||
for (j = 0; j < nr_node_ids; j++) {
|
||||
if (!node_online(j))
|
||||
continue;
|
||||
|
||||
/* Set ourselves in the remote node's masks */
|
||||
if (node_distance(j, node) <= sched_domains_numa_distance[i])
|
||||
cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
|
||||
}
|
||||
|
@ -758,22 +758,6 @@ static void hrtimer_switch_to_hres(void)
|
||||
retrigger_next_event(NULL);
|
||||
}
|
||||
|
||||
static void clock_was_set_work(struct work_struct *work)
|
||||
{
|
||||
clock_was_set();
|
||||
}
|
||||
|
||||
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
|
||||
|
||||
/*
|
||||
* Called from timekeeping and resume code to reprogram the hrtimer
|
||||
* interrupt device on all cpus.
|
||||
*/
|
||||
void clock_was_set_delayed(void)
|
||||
{
|
||||
schedule_work(&hrtimer_work);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int hrtimer_is_hres_enabled(void) { return 0; }
|
||||
@ -891,6 +875,22 @@ void clock_was_set(void)
|
||||
timerfd_clock_was_set();
|
||||
}
|
||||
|
||||
static void clock_was_set_work(struct work_struct *work)
|
||||
{
|
||||
clock_was_set();
|
||||
}
|
||||
|
||||
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
|
||||
|
||||
/*
|
||||
* Called from timekeeping and resume code to reprogram the hrtimer
|
||||
* interrupt device on all cpus and to notify timerfd.
|
||||
*/
|
||||
void clock_was_set_delayed(void)
|
||||
{
|
||||
schedule_work(&hrtimer_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* During resume we might have to reprogram the high resolution timer
|
||||
* interrupt on all online CPUs. However, all other CPUs will be
|
||||
@ -1030,12 +1030,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
||||
* remove hrtimer, called with base lock held
|
||||
*/
|
||||
static inline int
|
||||
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
|
||||
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
|
||||
bool restart, bool keep_local)
|
||||
{
|
||||
u8 state = timer->state;
|
||||
|
||||
if (state & HRTIMER_STATE_ENQUEUED) {
|
||||
int reprogram;
|
||||
bool reprogram;
|
||||
|
||||
/*
|
||||
* Remove the timer and force reprogramming when high
|
||||
@ -1048,8 +1049,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
|
||||
debug_deactivate(timer);
|
||||
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
|
||||
|
||||
/*
|
||||
* If the timer is not restarted then reprogramming is
|
||||
* required if the timer is local. If it is local and about
|
||||
* to be restarted, avoid programming it twice (on removal
|
||||
* and a moment later when it's requeued).
|
||||
*/
|
||||
if (!restart)
|
||||
state = HRTIMER_STATE_INACTIVE;
|
||||
else
|
||||
reprogram &= !keep_local;
|
||||
|
||||
__remove_hrtimer(timer, base, state, reprogram);
|
||||
return 1;
|
||||
@ -1103,9 +1112,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
struct hrtimer_clock_base *base)
|
||||
{
|
||||
struct hrtimer_clock_base *new_base;
|
||||
bool force_local, first;
|
||||
|
||||
/* Remove an active timer from the queue: */
|
||||
remove_hrtimer(timer, base, true);
|
||||
/*
|
||||
* If the timer is on the local cpu base and is the first expiring
|
||||
* timer then this might end up reprogramming the hardware twice
|
||||
* (on removal and on enqueue). To avoid that by prevent the
|
||||
* reprogram on removal, keep the timer local to the current CPU
|
||||
* and enforce reprogramming after it is queued no matter whether
|
||||
* it is the new first expiring timer again or not.
|
||||
*/
|
||||
force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
|
||||
force_local &= base->cpu_base->next_timer == timer;
|
||||
|
||||
/*
|
||||
* Remove an active timer from the queue. In case it is not queued
|
||||
* on the current CPU, make sure that remove_hrtimer() updates the
|
||||
* remote data correctly.
|
||||
*
|
||||
* If it's on the current CPU and the first expiring timer, then
|
||||
* skip reprogramming, keep the timer local and enforce
|
||||
* reprogramming later if it was the first expiring timer. This
|
||||
* avoids programming the underlying clock event twice (once at
|
||||
* removal and once after enqueue).
|
||||
*/
|
||||
remove_hrtimer(timer, base, true, force_local);
|
||||
|
||||
if (mode & HRTIMER_MODE_REL)
|
||||
tim = ktime_add_safe(tim, base->get_time());
|
||||
@ -1115,9 +1146,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
|
||||
|
||||
/* Switch the timer base, if necessary: */
|
||||
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
|
||||
if (!force_local) {
|
||||
new_base = switch_hrtimer_base(timer, base,
|
||||
mode & HRTIMER_MODE_PINNED);
|
||||
} else {
|
||||
new_base = base;
|
||||
}
|
||||
|
||||
return enqueue_hrtimer(timer, new_base, mode);
|
||||
first = enqueue_hrtimer(timer, new_base, mode);
|
||||
if (!force_local)
|
||||
return first;
|
||||
|
||||
/*
|
||||
* Timer was forced to stay on the current CPU to avoid
|
||||
* reprogramming on removal and enqueue. Force reprogram the
|
||||
* hardware by evaluating the new first expiring timer.
|
||||
*/
|
||||
hrtimer_force_reprogram(new_base->cpu_base, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1183,7 +1229,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
|
||||
base = lock_hrtimer_base(timer, &flags);
|
||||
|
||||
if (!hrtimer_callback_running(timer))
|
||||
ret = remove_hrtimer(timer, base, false);
|
||||
ret = remove_hrtimer(timer, base, false, false);
|
||||
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
|
||||
|
@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
|
||||
if (!p)
|
||||
goto out;
|
||||
|
||||
/* Protect timer list r/w in arm_timer() */
|
||||
sighand = lock_task_sighand(p, &flags);
|
||||
if (unlikely(sighand == NULL))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Fetch the current sample and update the timer's expiry time.
|
||||
*/
|
||||
@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
|
||||
|
||||
bump_cpu_timer(timer, now);
|
||||
|
||||
/* Protect timer list r/w in arm_timer() */
|
||||
sighand = lock_task_sighand(p, &flags);
|
||||
if (unlikely(sighand == NULL))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now re-arm for the new expiry time.
|
||||
*/
|
||||
@ -1346,8 +1346,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
|
||||
}
|
||||
}
|
||||
|
||||
if (!*newval)
|
||||
return;
|
||||
*newval += now;
|
||||
}
|
||||
|
||||
|
@ -165,3 +165,6 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
|
||||
|
||||
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
|
||||
void timer_clear_idle(void);
|
||||
|
||||
void clock_was_set(void);
|
||||
void clock_was_set_delayed(void);
|
||||
|
@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
depends on DYNAMIC_FTRACE_WITH_REGS
|
||||
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
|
||||
config DYNAMIC_FTRACE_WITH_ARGS
|
||||
def_bool y
|
||||
depends on DYNAMIC_FTRACE
|
||||
depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||
|
||||
config FUNCTION_PROFILER
|
||||
bool "Kernel function profiler"
|
||||
depends on FUNCTION_TRACER
|
||||
|
@ -2897,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
|
||||
|
||||
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
|
||||
{
|
||||
enum event_trigger_type tt = ETT_NONE;
|
||||
struct trace_event_file *file = fbuffer->trace_file;
|
||||
|
||||
if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
|
||||
fbuffer->entry, &tt))
|
||||
goto discard;
|
||||
|
||||
if (static_key_false(&tracepoint_printk_key.key))
|
||||
output_printk(fbuffer);
|
||||
|
||||
if (static_branch_unlikely(&trace_event_exports_enabled))
|
||||
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
|
||||
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
|
||||
fbuffer->event, fbuffer->entry,
|
||||
fbuffer->trace_ctx, fbuffer->regs);
|
||||
|
||||
trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
|
||||
fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
|
||||
|
||||
discard:
|
||||
if (tt)
|
||||
event_triggers_post_call(file, tt);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
|
||||
|
||||
|
@ -1389,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file,
|
||||
event_triggers_post_call(file, tt);
|
||||
}
|
||||
|
||||
/**
|
||||
* event_trigger_unlock_commit_regs - handle triggers and finish event commit
|
||||
* @file: The file pointer associated with the event
|
||||
* @buffer: The ring buffer that the event is being written to
|
||||
* @event: The event meta data in the ring buffer
|
||||
* @entry: The event itself
|
||||
* @trace_ctx: The tracing context flags.
|
||||
*
|
||||
* This is a helper function to handle triggers that require data
|
||||
* from the event itself. It also tests the event against filters and
|
||||
* if the event is soft disabled and should be discarded.
|
||||
*
|
||||
* Same as event_trigger_unlock_commit() but calls
|
||||
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
|
||||
*/
|
||||
static inline void
|
||||
event_trigger_unlock_commit_regs(struct trace_event_file *file,
|
||||
struct trace_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry, unsigned int trace_ctx,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
enum event_trigger_type tt = ETT_NONE;
|
||||
|
||||
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
|
||||
trace_buffer_unlock_commit_regs(file->tr, buffer, event,
|
||||
trace_ctx, regs);
|
||||
|
||||
if (tt)
|
||||
event_triggers_post_call(file, tt);
|
||||
}
|
||||
|
||||
#define FILTER_PRED_INVALID ((unsigned short)-1)
|
||||
#define FILTER_PRED_IS_RIGHT (1 << 15)
|
||||
#define FILTER_PRED_FOLD (1 << 15)
|
||||
|
@ -3430,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
|
||||
event = data->match_data.event;
|
||||
}
|
||||
|
||||
if (!event)
|
||||
goto free;
|
||||
/*
|
||||
* At this point, we're looking at a field on another
|
||||
* event. Because we can't modify a hist trigger on
|
||||
|
@ -253,10 +253,40 @@ static struct osnoise_data {
|
||||
*/
|
||||
static bool osnoise_busy;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/*
|
||||
* Print the osnoise header info.
|
||||
*/
|
||||
static void print_osnoise_headers(struct seq_file *s)
|
||||
{
|
||||
if (osnoise_data.tainted)
|
||||
seq_puts(s, "# osnoise is tainted!\n");
|
||||
|
||||
seq_puts(s, "# _-------=> irqs-off\n");
|
||||
seq_puts(s, "# / _------=> need-resched\n");
|
||||
seq_puts(s, "# | / _-----=> need-resched-lazy\n");
|
||||
seq_puts(s, "# || / _----=> hardirq/softirq\n");
|
||||
seq_puts(s, "# ||| / _---=> preempt-depth\n");
|
||||
seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
|
||||
seq_puts(s, "# ||||| / _-=> migrate-disable\n");
|
||||
|
||||
seq_puts(s, "# |||||| / ");
|
||||
seq_puts(s, " MAX\n");
|
||||
|
||||
seq_puts(s, "# ||||| / ");
|
||||
seq_puts(s, " SINGLE Interference counters:\n");
|
||||
|
||||
seq_puts(s, "# ||||||| RUNTIME ");
|
||||
seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
|
||||
|
||||
seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US ");
|
||||
seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
|
||||
|
||||
seq_puts(s, "# | | | ||||||| | | ");
|
||||
seq_puts(s, " | | | | | | | |\n");
|
||||
}
|
||||
#else /* CONFIG_PREEMPT_RT */
|
||||
static void print_osnoise_headers(struct seq_file *s)
|
||||
{
|
||||
if (osnoise_data.tainted)
|
||||
seq_puts(s, "# osnoise is tainted!\n");
|
||||
@ -279,6 +309,7 @@ static void print_osnoise_headers(struct seq_file *s)
|
||||
seq_puts(s, "# | | | |||| | | ");
|
||||
seq_puts(s, " | | | | | | | |\n");
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_RT */
|
||||
|
||||
/*
|
||||
* osnoise_taint - report an osnoise error.
|
||||
@ -323,6 +354,24 @@ static void trace_osnoise_sample(struct osnoise_sample *sample)
|
||||
/*
|
||||
* Print the timerlat header info.
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
static void print_timerlat_headers(struct seq_file *s)
|
||||
{
|
||||
seq_puts(s, "# _-------=> irqs-off\n");
|
||||
seq_puts(s, "# / _------=> need-resched\n");
|
||||
seq_puts(s, "# | / _-----=> need-resched-lazy\n");
|
||||
seq_puts(s, "# || / _----=> hardirq/softirq\n");
|
||||
seq_puts(s, "# ||| / _---=> preempt-depth\n");
|
||||
seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
|
||||
seq_puts(s, "# ||||| / _-=> migrate-disable\n");
|
||||
seq_puts(s, "# |||||| /\n");
|
||||
seq_puts(s, "# ||||||| ACTIVATION\n");
|
||||
seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID ");
|
||||
seq_puts(s, " CONTEXT LATENCY\n");
|
||||
seq_puts(s, "# | | | ||||||| | | ");
|
||||
seq_puts(s, " | |\n");
|
||||
}
|
||||
#else /* CONFIG_PREEMPT_RT */
|
||||
static void print_timerlat_headers(struct seq_file *s)
|
||||
{
|
||||
seq_puts(s, "# _-----=> irqs-off\n");
|
||||
@ -336,6 +385,7 @@ static void print_timerlat_headers(struct seq_file *s)
|
||||
seq_puts(s, "# | | | |||| | | ");
|
||||
seq_puts(s, " | |\n");
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_RT */
|
||||
|
||||
/*
|
||||
* Record an timerlat_sample into the tracer buffer.
|
||||
@ -1025,9 +1075,13 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *
|
||||
/*
|
||||
* osnoise_stop_tracing - Stop tracing and the tracer.
|
||||
*/
|
||||
static void osnoise_stop_tracing(void)
|
||||
static __always_inline void osnoise_stop_tracing(void)
|
||||
{
|
||||
struct trace_array *tr = osnoise_trace;
|
||||
|
||||
trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
|
||||
"stop tracing hit on cpu %d\n", smp_processor_id());
|
||||
|
||||
tracer_tracing_off(tr);
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(mpi_const);
|
||||
/****************
|
||||
* Note: It was a bad idea to use the number of limbs to allocate
|
||||
* because on a alpha the limbs are large but we normally need
|
||||
* integers of n bits - So we should chnage this to bits (or bytes).
|
||||
* integers of n bits - So we should change this to bits (or bytes).
|
||||
*
|
||||
* But mpi_alloc is used in a lot of places :-)
|
||||
*/
|
||||
@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
|
||||
return 0; /* no need to do it */
|
||||
|
||||
if (a->d) {
|
||||
p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
|
||||
p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
|
||||
|
@ -271,7 +271,7 @@ static u32 __init next_test_random(u32 max_bits)
|
||||
{
|
||||
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
|
||||
|
||||
return prandom_u32_state(&rnd_state) & (UINT_MAX >> (32 - n_bits));
|
||||
return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
|
||||
}
|
||||
|
||||
static unsigned long long __init next_test_random_ull(void)
|
||||
@ -280,7 +280,7 @@ static unsigned long long __init next_test_random_ull(void)
|
||||
u32 n_bits = (hweight32(rand1) * 3) % 64;
|
||||
u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
|
||||
|
||||
return val & (ULLONG_MAX >> (64 - n_bits));
|
||||
return val & GENMASK_ULL(n_bits, 0);
|
||||
}
|
||||
|
||||
#define random_for_type(T) \
|
||||
|
19
mm/hugetlb.c
19
mm/hugetlb.c
@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
||||
if (!rc) {
|
||||
/*
|
||||
* This indicates there is an entry in the reserve map
|
||||
* added by alloc_huge_page. We know it was added
|
||||
* not added by alloc_huge_page. We know it was added
|
||||
* before the alloc_huge_page call, otherwise
|
||||
* HPageRestoreReserve would be set on the page.
|
||||
* Remove the entry so that a subsequent allocation
|
||||
@ -4660,7 +4660,9 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
spin_unlock(ptl);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
out_release_all:
|
||||
restore_reserve_on_error(h, vma, haddr, new_page);
|
||||
/* No restore in case of successful pagetable update (Break COW) */
|
||||
if (new_page != old_page)
|
||||
restore_reserve_on_error(h, vma, haddr, new_page);
|
||||
put_page(new_page);
|
||||
out_release_old:
|
||||
put_page(old_page);
|
||||
@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||
pte_t new_pte;
|
||||
spinlock_t *ptl;
|
||||
unsigned long haddr = address & huge_page_mask(h);
|
||||
bool new_page = false;
|
||||
bool new_page, new_pagecache_page = false;
|
||||
|
||||
/*
|
||||
* Currently, we are forced to kill the process in the event the
|
||||
@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||
goto out;
|
||||
|
||||
retry:
|
||||
new_page = false;
|
||||
page = find_lock_page(mapping, idx);
|
||||
if (!page) {
|
||||
/* Check for page in userfault range */
|
||||
@ -4842,6 +4845,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||
goto retry;
|
||||
goto out;
|
||||
}
|
||||
new_pagecache_page = true;
|
||||
} else {
|
||||
lock_page(page);
|
||||
if (unlikely(anon_vma_prepare(vma))) {
|
||||
@ -4926,7 +4930,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||
spin_unlock(ptl);
|
||||
backout_unlocked:
|
||||
unlock_page(page);
|
||||
restore_reserve_on_error(h, vma, haddr, page);
|
||||
/* restore reserve for newly allocated pages not in page cache */
|
||||
if (new_page && !new_pagecache_page)
|
||||
restore_reserve_on_error(h, vma, haddr, page);
|
||||
put_page(page);
|
||||
goto out;
|
||||
}
|
||||
@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
int ret = -ENOMEM;
|
||||
struct page *page;
|
||||
int writable;
|
||||
bool new_pagecache_page = false;
|
||||
|
||||
if (is_continue) {
|
||||
ret = -EFAULT;
|
||||
@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
ret = huge_add_to_page_cache(page, mapping, idx);
|
||||
if (ret)
|
||||
goto out_release_nounlock;
|
||||
new_pagecache_page = true;
|
||||
}
|
||||
|
||||
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
|
||||
@ -5291,7 +5299,8 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
if (vm_shared || is_continue)
|
||||
unlock_page(page);
|
||||
out_release_nounlock:
|
||||
restore_reserve_on_error(h, dst_vma, dst_addr, page);
|
||||
if (!new_pagecache_page)
|
||||
restore_reserve_on_error(h, dst_vma, dst_addr, page);
|
||||
put_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page)
|
||||
* unexpected races caused by taking a page refcount.
|
||||
*/
|
||||
if (!HWPoisonHandlable(head))
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
|
||||
if (PageTransHuge(head)) {
|
||||
/*
|
||||
@ -1199,9 +1199,15 @@ static int get_any_page(struct page *p, unsigned long flags)
|
||||
}
|
||||
goto out;
|
||||
} else if (ret == -EBUSY) {
|
||||
/* We raced with freeing huge page to buddy, retry. */
|
||||
if (pass++ < 3)
|
||||
/*
|
||||
* We raced with (possibly temporary) unhandlable
|
||||
* page, retry.
|
||||
*/
|
||||
if (pass++ < 3) {
|
||||
shake_page(p, 1);
|
||||
goto try_again;
|
||||
}
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1731,6 +1731,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
||||
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
|
||||
memory_notify(MEM_CANCEL_OFFLINE, &arg);
|
||||
failed_removal_pcplists_disabled:
|
||||
lru_cache_enable();
|
||||
zone_pcp_enable(zone);
|
||||
failed_removal:
|
||||
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
|
||||
|
@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
|
||||
* comment in free_unref_page.
|
||||
*/
|
||||
migratetype = get_pcppage_migratetype(page);
|
||||
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
|
||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||
list_del(&page->lru);
|
||||
free_one_page(page_zone(page), page, pfn, 0,
|
||||
migratetype, FPI_NONE);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-isolated types over MIGRATE_PCPTYPES get added
|
||||
* to the MIGRATE_MOVABLE pcp list.
|
||||
*/
|
||||
set_pcppage_migratetype(page, MIGRATE_MOVABLE);
|
||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||
list_del(&page->lru);
|
||||
free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
|
||||
continue;
|
||||
}
|
||||
|
||||
set_page_private(page, pfn);
|
||||
@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
|
||||
list_for_each_entry_safe(page, next, list, lru) {
|
||||
pfn = page_private(page);
|
||||
set_page_private(page, 0);
|
||||
|
||||
/*
|
||||
* Non-isolated types over MIGRATE_PCPTYPES get added
|
||||
* to the MIGRATE_MOVABLE pcp list.
|
||||
*/
|
||||
migratetype = get_pcppage_migratetype(page);
|
||||
if (unlikely(migratetype >= MIGRATE_PCPTYPES))
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
|
||||
trace_mm_page_free_batched(page);
|
||||
free_unref_page_commit(page, pfn, migratetype, 0);
|
||||
|
||||
|
14
mm/shmem.c
14
mm/shmem.c
@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
|
||||
struct swap_info_struct *si;
|
||||
struct page *page = NULL;
|
||||
struct page *page;
|
||||
swp_entry_t swap;
|
||||
int error;
|
||||
|
||||
@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
|
||||
swap = radix_to_swp_entry(*pagep);
|
||||
*pagep = NULL;
|
||||
|
||||
/* Prevent swapoff from happening to us. */
|
||||
si = get_swap_device(swap);
|
||||
if (!si) {
|
||||
error = EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
/* Look it up and read it in.. */
|
||||
page = lookup_swap_cache(swap, NULL, 0);
|
||||
if (!page) {
|
||||
@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
|
||||
swap_free(swap);
|
||||
|
||||
*pagep = page;
|
||||
if (si)
|
||||
put_swap_device(si);
|
||||
return 0;
|
||||
failed:
|
||||
if (!shmem_confirm_swap(mapping, index, swap))
|
||||
@ -1784,9 +1775,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
if (si)
|
||||
put_swap_device(si);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
||||
if (!mask)
|
||||
goto skip;
|
||||
|
||||
/* Test swap type to make sure the dereference is safe */
|
||||
if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
|
||||
struct inode *inode = si->swap_file->f_mapping->host;
|
||||
if (inode_read_congested(inode))
|
||||
goto skip;
|
||||
}
|
||||
|
||||
do_poll = false;
|
||||
/* Read a page_cluster sized and aligned cluster around offset. */
|
||||
start_offset = offset & ~mask;
|
||||
|
30
mm/vmscan.c
30
mm/vmscan.c
@ -100,9 +100,12 @@ struct scan_control {
|
||||
unsigned int may_swap:1;
|
||||
|
||||
/*
|
||||
* Cgroups are not reclaimed below their configured memory.low,
|
||||
* unless we threaten to OOM. If any cgroups are skipped due to
|
||||
* memory.low and nothing was reclaimed, go back for memory.low.
|
||||
* Cgroup memory below memory.low is protected as long as we
|
||||
* don't threaten to OOM. If any cgroup is reclaimed at
|
||||
* reduced force or passed over entirely due to its memory.low
|
||||
* setting (memcg_low_skipped), and nothing is reclaimed as a
|
||||
* result, then go back for one more cycle that reclaims the protected
|
||||
* memory (memcg_low_reclaim) to avert OOM.
|
||||
*/
|
||||
unsigned int memcg_low_reclaim:1;
|
||||
unsigned int memcg_low_skipped:1;
|
||||
@ -2537,15 +2540,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
||||
for_each_evictable_lru(lru) {
|
||||
int file = is_file_lru(lru);
|
||||
unsigned long lruvec_size;
|
||||
unsigned long low, min;
|
||||
unsigned long scan;
|
||||
unsigned long protection;
|
||||
|
||||
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
|
||||
protection = mem_cgroup_protection(sc->target_mem_cgroup,
|
||||
memcg,
|
||||
sc->memcg_low_reclaim);
|
||||
mem_cgroup_protection(sc->target_mem_cgroup, memcg,
|
||||
&min, &low);
|
||||
|
||||
if (protection) {
|
||||
if (min || low) {
|
||||
/*
|
||||
* Scale a cgroup's reclaim pressure by proportioning
|
||||
* its current usage to its memory.low or memory.min
|
||||
@ -2576,6 +2578,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
||||
* hard protection.
|
||||
*/
|
||||
unsigned long cgroup_size = mem_cgroup_size(memcg);
|
||||
unsigned long protection;
|
||||
|
||||
/* memory.low scaling, make sure we retry before OOM */
|
||||
if (!sc->memcg_low_reclaim && low > min) {
|
||||
protection = low;
|
||||
sc->memcg_low_skipped = 1;
|
||||
} else {
|
||||
protection = min;
|
||||
}
|
||||
|
||||
/* Avoid TOCTOU with earlier protection check */
|
||||
cgroup_size = max(cgroup_size, protection);
|
||||
@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
|
||||
.may_swap = 1,
|
||||
.reclaim_idx = gfp_zone(gfp_mask),
|
||||
};
|
||||
unsigned long pflags;
|
||||
|
||||
trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
|
||||
sc.gfp_mask);
|
||||
|
||||
cond_resched();
|
||||
psi_memstall_enter(&pflags);
|
||||
fs_reclaim_acquire(sc.gfp_mask);
|
||||
/*
|
||||
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
|
||||
@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
|
||||
current->flags &= ~PF_SWAPWRITE;
|
||||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
fs_reclaim_release(sc.gfp_mask);
|
||||
psi_memstall_leave(&pflags);
|
||||
|
||||
trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
|
||||
|
||||
|
@ -170,7 +170,8 @@ static void lowpan_dev_debugfs_ctx_init(struct net_device *dev,
|
||||
struct dentry *root;
|
||||
char buf[32];
|
||||
|
||||
WARN_ON_ONCE(id > LOWPAN_IPHC_CTX_TABLE_SIZE);
|
||||
if (WARN_ON_ONCE(id >= LOWPAN_IPHC_CTX_TABLE_SIZE))
|
||||
return;
|
||||
|
||||
sprintf(buf, "%d", id);
|
||||
|
||||
|
@ -1343,6 +1343,12 @@ int hci_inquiry(void __user *arg)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Restrict maximum inquiry length to 60 seconds */
|
||||
if (ir.length > 60) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
|
||||
inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
|
||||
@ -1727,6 +1733,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
|
||||
hci_request_cancel_all(hdev);
|
||||
hci_req_sync_lock(hdev);
|
||||
|
||||
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
|
||||
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
|
||||
test_bit(HCI_UP, &hdev->flags)) {
|
||||
/* Execute vendor specific shutdown routine */
|
||||
if (hdev->shutdown)
|
||||
hdev->shutdown(hdev);
|
||||
}
|
||||
|
||||
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
|
||||
cancel_delayed_work_sync(&hdev->cmd_timer);
|
||||
hci_req_sync_unlock(hdev);
|
||||
@ -1798,14 +1812,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
|
||||
clear_bit(HCI_INIT, &hdev->flags);
|
||||
}
|
||||
|
||||
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
|
||||
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
|
||||
test_bit(HCI_UP, &hdev->flags)) {
|
||||
/* Execute vendor specific shutdown routine */
|
||||
if (hdev->shutdown)
|
||||
hdev->shutdown(hdev);
|
||||
}
|
||||
|
||||
/* flush cmd work */
|
||||
flush_work(&hdev->cmd_work);
|
||||
|
||||
|
@ -85,7 +85,6 @@ static void sco_sock_timeout(struct timer_list *t)
|
||||
sk->sk_state_change(sk);
|
||||
bh_unlock_sock(sk);
|
||||
|
||||
sco_sock_kill(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
@ -177,7 +176,6 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
|
||||
sco_sock_clear_timer(sk);
|
||||
sco_chan_del(sk, err);
|
||||
bh_unlock_sock(sk);
|
||||
sco_sock_kill(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
@ -310,7 +308,7 @@ static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
|
||||
if (!sk)
|
||||
goto drop;
|
||||
|
||||
BT_DBG("sk %p len %d", sk, skb->len);
|
||||
BT_DBG("sk %p len %u", sk, skb->len);
|
||||
|
||||
if (sk->sk_state != BT_CONNECTED)
|
||||
goto drop;
|
||||
@ -394,8 +392,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
|
||||
*/
|
||||
static void sco_sock_kill(struct sock *sk)
|
||||
{
|
||||
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
|
||||
sock_flag(sk, SOCK_DEAD))
|
||||
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
|
||||
return;
|
||||
|
||||
BT_DBG("sk %p state %d", sk, sk->sk_state);
|
||||
@ -447,7 +444,6 @@ static void sco_sock_close(struct sock *sk)
|
||||
lock_sock(sk);
|
||||
__sco_sock_close(sk);
|
||||
release_sock(sk);
|
||||
sco_sock_kill(sk);
|
||||
}
|
||||
|
||||
static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
|
||||
@ -773,6 +769,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
|
||||
cp.max_latency = cpu_to_le16(0xffff);
|
||||
cp.retrans_effort = 0xff;
|
||||
break;
|
||||
default:
|
||||
/* use CVSD settings as fallback */
|
||||
cp.max_latency = cpu_to_le16(0xffff);
|
||||
cp.retrans_effort = 0xff;
|
||||
break;
|
||||
}
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
|
||||
@ -905,7 +906,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
|
||||
opts.mtu = sco_pi(sk)->conn->mtu;
|
||||
|
||||
BT_DBG("mtu %d", opts.mtu);
|
||||
BT_DBG("mtu %u", opts.mtu);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(opts));
|
||||
if (copy_to_user(optval, (char *)&opts, len))
|
||||
@ -1167,7 +1168,7 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
|
||||
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
|
||||
return;
|
||||
|
||||
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
|
||||
BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
|
||||
|
||||
if (!status) {
|
||||
struct sco_conn *conn;
|
||||
@ -1196,7 +1197,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
|
||||
if (!conn)
|
||||
goto drop;
|
||||
|
||||
BT_DBG("conn %p len %d", conn, skb->len);
|
||||
BT_DBG("conn %p len %u", conn, skb->len);
|
||||
|
||||
if (skb->len) {
|
||||
sco_recv_frame(conn, skb);
|
||||
|
@ -3801,10 +3801,12 @@ static void devlink_param_notify(struct devlink *devlink,
|
||||
struct devlink_param_item *param_item,
|
||||
enum devlink_command cmd);
|
||||
|
||||
static void devlink_reload_netns_change(struct devlink *devlink,
|
||||
struct net *dest_net)
|
||||
static void devlink_ns_change_notify(struct devlink *devlink,
|
||||
struct net *dest_net, struct net *curr_net,
|
||||
bool new)
|
||||
{
|
||||
struct devlink_param_item *param_item;
|
||||
enum devlink_command cmd;
|
||||
|
||||
/* Userspace needs to be notified about devlink objects
|
||||
* removed from original and entering new network namespace.
|
||||
@ -3812,17 +3814,18 @@ static void devlink_reload_netns_change(struct devlink *devlink,
|
||||
* reload process so the notifications are generated separatelly.
|
||||
*/
|
||||
|
||||
list_for_each_entry(param_item, &devlink->param_list, list)
|
||||
devlink_param_notify(devlink, 0, param_item,
|
||||
DEVLINK_CMD_PARAM_DEL);
|
||||
devlink_notify(devlink, DEVLINK_CMD_DEL);
|
||||
if (!dest_net || net_eq(dest_net, curr_net))
|
||||
return;
|
||||
|
||||
__devlink_net_set(devlink, dest_net);
|
||||
if (new)
|
||||
devlink_notify(devlink, DEVLINK_CMD_NEW);
|
||||
|
||||
devlink_notify(devlink, DEVLINK_CMD_NEW);
|
||||
cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
|
||||
list_for_each_entry(param_item, &devlink->param_list, list)
|
||||
devlink_param_notify(devlink, 0, param_item,
|
||||
DEVLINK_CMD_PARAM_NEW);
|
||||
devlink_param_notify(devlink, 0, param_item, cmd);
|
||||
|
||||
if (!new)
|
||||
devlink_notify(devlink, DEVLINK_CMD_DEL);
|
||||
}
|
||||
|
||||
static bool devlink_reload_supported(const struct devlink_ops *ops)
|
||||
@ -3902,6 +3905,7 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
|
||||
u32 *actions_performed, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
|
||||
struct net *curr_net;
|
||||
int err;
|
||||
|
||||
if (!devlink->reload_enabled)
|
||||
@ -3909,18 +3913,22 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
|
||||
|
||||
memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
|
||||
sizeof(remote_reload_stats));
|
||||
|
||||
curr_net = devlink_net(devlink);
|
||||
devlink_ns_change_notify(devlink, dest_net, curr_net, false);
|
||||
err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (dest_net && !net_eq(dest_net, devlink_net(devlink)))
|
||||
devlink_reload_netns_change(devlink, dest_net);
|
||||
if (dest_net && !net_eq(dest_net, curr_net))
|
||||
__devlink_net_set(devlink, dest_net);
|
||||
|
||||
err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
|
||||
devlink_reload_failed_set(devlink, !!err);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
devlink_ns_change_notify(devlink, dest_net, curr_net, true);
|
||||
WARN_ON(!(*actions_performed & BIT(action)));
|
||||
/* Catch driver on updating the remote action within devlink reload */
|
||||
WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
|
||||
@ -4117,7 +4125,7 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
|
||||
|
||||
static void devlink_flash_update_begin_notify(struct devlink *devlink)
|
||||
{
|
||||
struct devlink_flash_notify params = { 0 };
|
||||
struct devlink_flash_notify params = {};
|
||||
|
||||
__devlink_flash_update_notify(devlink,
|
||||
DEVLINK_CMD_FLASH_UPDATE,
|
||||
@ -4126,7 +4134,7 @@ static void devlink_flash_update_begin_notify(struct devlink *devlink)
|
||||
|
||||
static void devlink_flash_update_end_notify(struct devlink *devlink)
|
||||
{
|
||||
struct devlink_flash_notify params = { 0 };
|
||||
struct devlink_flash_notify params = {};
|
||||
|
||||
__devlink_flash_update_notify(devlink,
|
||||
DEVLINK_CMD_FLASH_UPDATE_END,
|
||||
|
@ -2608,6 +2608,7 @@ static int do_setlink(const struct sk_buff *skb,
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
|
||||
const char *pat = ifname && ifname[0] ? ifname : NULL;
|
||||
struct net *net;
|
||||
int new_ifindex;
|
||||
|
||||
@ -2623,7 +2624,7 @@ static int do_setlink(const struct sk_buff *skb,
|
||||
else
|
||||
new_ifindex = 0;
|
||||
|
||||
err = __dev_change_net_namespace(dev, net, ifname, new_ifindex);
|
||||
err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
|
||||
put_net(net);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
@ -18,16 +18,6 @@ if NET_DSA
|
||||
|
||||
# Drivers must select the appropriate tagging format(s)
|
||||
|
||||
config NET_DSA_TAG_8021Q
|
||||
tristate
|
||||
select VLAN_8021Q
|
||||
help
|
||||
Unlike the other tagging protocols, the 802.1Q config option simply
|
||||
provides helpers for other tagging implementations that might rely on
|
||||
VLAN in one way or another. It is not a complete solution.
|
||||
|
||||
Drivers which use these helpers should select this as dependency.
|
||||
|
||||
config NET_DSA_TAG_AR9331
|
||||
tristate "Tag driver for Atheros AR9331 SoC with built-in switch"
|
||||
help
|
||||
@ -126,7 +116,6 @@ config NET_DSA_TAG_OCELOT_8021Q
|
||||
tristate "Tag driver for Ocelot family of switches, using VLAN"
|
||||
depends on MSCC_OCELOT_SWITCH_LIB || \
|
||||
(MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
|
||||
select NET_DSA_TAG_8021Q
|
||||
help
|
||||
Say Y or M if you want to enable support for tagging frames with a
|
||||
custom VLAN-based header. Frames that require timestamping, such as
|
||||
@ -149,7 +138,7 @@ config NET_DSA_TAG_LAN9303
|
||||
|
||||
config NET_DSA_TAG_SJA1105
|
||||
tristate "Tag driver for NXP SJA1105 switches"
|
||||
select NET_DSA_TAG_8021Q
|
||||
depends on (NET_DSA_SJA1105 && NET_DSA_SJA1105_PTP) || !NET_DSA_SJA1105 || !NET_DSA_SJA1105_PTP
|
||||
select PACKING
|
||||
help
|
||||
Say Y or M if you want to enable support for tagging frames with the
|
||||
|
@ -1,10 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# the core
|
||||
obj-$(CONFIG_NET_DSA) += dsa_core.o
|
||||
dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
|
||||
dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o tag_8021q.o
|
||||
|
||||
# tagging formats
|
||||
obj-$(CONFIG_NET_DSA_TAG_8021Q) += tag_8021q.o
|
||||
obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
|
||||
obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
|
||||
obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
|
||||
|
@ -234,8 +234,6 @@ int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
|
||||
int dsa_port_bridge_flags(const struct dsa_port *dp,
|
||||
struct switchdev_brport_flags flags,
|
||||
struct netlink_ext_ack *extack);
|
||||
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
|
||||
struct netlink_ext_ack *extack);
|
||||
int dsa_port_vlan_add(struct dsa_port *dp,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -186,10 +186,6 @@ static int dsa_port_switchdev_sync(struct dsa_port *dp,
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
|
||||
err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
|
||||
err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
@ -272,12 +268,6 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
|
||||
|
||||
/* VLAN filtering is handled by dsa_switch_bridge_leave */
|
||||
|
||||
/* Some drivers treat the notification for having a local multicast
|
||||
* router by allowing multicast to be flooded to the CPU, so we should
|
||||
* allow this in standalone mode too.
|
||||
*/
|
||||
dsa_port_mrouter(dp->cpu_dp, true, NULL);
|
||||
|
||||
/* Ageing time may be global to the switch chip, so don't change it
|
||||
* here because we have no good reason (or value) to change it to.
|
||||
*/
|
||||
@ -607,17 +597,6 @@ int dsa_port_bridge_flags(const struct dsa_port *dp,
|
||||
return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
|
||||
}
|
||||
|
||||
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
|
||||
if (!ds->ops->port_set_mrouter)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
|
||||
}
|
||||
|
||||
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
|
||||
bool targeted_match)
|
||||
{
|
||||
|
@ -314,12 +314,6 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
|
||||
|
||||
ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
|
||||
break;
|
||||
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
|
||||
if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
|
@ -493,5 +493,3 @@ void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
|
||||
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
|
||||
if (!doi_def)
|
||||
return;
|
||||
|
||||
switch (doi_def->type) {
|
||||
case CIPSO_V4_MAP_TRANS:
|
||||
kfree(doi_def->map.std->lvl.cipso);
|
||||
kfree(doi_def->map.std->lvl.local);
|
||||
kfree(doi_def->map.std->cat.cipso);
|
||||
kfree(doi_def->map.std->cat.local);
|
||||
kfree(doi_def->map.std);
|
||||
break;
|
||||
if (doi_def->map.std) {
|
||||
switch (doi_def->type) {
|
||||
case CIPSO_V4_MAP_TRANS:
|
||||
kfree(doi_def->map.std->lvl.cipso);
|
||||
kfree(doi_def->map.std->lvl.local);
|
||||
kfree(doi_def->map.std->cat.cipso);
|
||||
kfree(doi_def->map.std->cat.local);
|
||||
kfree(doi_def->map.std);
|
||||
break;
|
||||
}
|
||||
}
|
||||
kfree(doi_def);
|
||||
}
|
||||
|
@ -2720,6 +2720,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
|
||||
rv = 1;
|
||||
} else if (im) {
|
||||
if (src_addr) {
|
||||
spin_lock_bh(&im->lock);
|
||||
for (psf = im->sources; psf; psf = psf->sf_next) {
|
||||
if (psf->sf_inaddr == src_addr)
|
||||
break;
|
||||
@ -2730,6 +2731,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
|
||||
im->sfcount[MCAST_EXCLUDE];
|
||||
else
|
||||
rv = im->sfcount[MCAST_EXCLUDE] != 0;
|
||||
spin_unlock_bh(&im->lock);
|
||||
} else
|
||||
rv = 1; /* unspecified source; tentatively allow */
|
||||
}
|
||||
|
@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
|
||||
{
|
||||
if (csum && skb_checksum_start(skb) < skb->data)
|
||||
return -EINVAL;
|
||||
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
|
||||
}
|
||||
|
||||
|
@ -586,28 +586,35 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
|
||||
}
|
||||
}
|
||||
|
||||
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
|
||||
static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
|
||||
{
|
||||
struct fib_nh_exception *fnhe, *oldest;
|
||||
struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
|
||||
struct fib_nh_exception *fnhe, *oldest = NULL;
|
||||
|
||||
oldest = rcu_dereference(hash->chain);
|
||||
for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
|
||||
fnhe = rcu_dereference(fnhe->fnhe_next)) {
|
||||
if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
|
||||
for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
|
||||
fnhe = rcu_dereference_protected(*fnhe_p,
|
||||
lockdep_is_held(&fnhe_lock));
|
||||
if (!fnhe)
|
||||
break;
|
||||
if (!oldest ||
|
||||
time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
|
||||
oldest = fnhe;
|
||||
oldest_p = fnhe_p;
|
||||
}
|
||||
}
|
||||
fnhe_flush_routes(oldest);
|
||||
return oldest;
|
||||
*oldest_p = oldest->fnhe_next;
|
||||
kfree_rcu(oldest, rcu);
|
||||
}
|
||||
|
||||
static inline u32 fnhe_hashfun(__be32 daddr)
|
||||
static u32 fnhe_hashfun(__be32 daddr)
|
||||
{
|
||||
static u32 fnhe_hashrnd __read_mostly;
|
||||
u32 hval;
|
||||
static siphash_key_t fnhe_hash_key __read_mostly;
|
||||
u64 hval;
|
||||
|
||||
net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
|
||||
hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
|
||||
return hash_32(hval, FNHE_HASH_SHIFT);
|
||||
net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
|
||||
hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
|
||||
return hash_64(hval, FNHE_HASH_SHIFT);
|
||||
}
|
||||
|
||||
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
|
||||
@ -676,16 +683,21 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
|
||||
if (rt)
|
||||
fill_route_from_fnhe(rt, fnhe);
|
||||
} else {
|
||||
if (depth > FNHE_RECLAIM_DEPTH)
|
||||
fnhe = fnhe_oldest(hash);
|
||||
else {
|
||||
fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
|
||||
if (!fnhe)
|
||||
goto out_unlock;
|
||||
/* Randomize max depth to avoid some side channels attacks. */
|
||||
int max_depth = FNHE_RECLAIM_DEPTH +
|
||||
prandom_u32_max(FNHE_RECLAIM_DEPTH);
|
||||
|
||||
fnhe->fnhe_next = hash->chain;
|
||||
rcu_assign_pointer(hash->chain, fnhe);
|
||||
while (depth > max_depth) {
|
||||
fnhe_remove_oldest(hash);
|
||||
depth--;
|
||||
}
|
||||
|
||||
fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
|
||||
if (!fnhe)
|
||||
goto out_unlock;
|
||||
|
||||
fnhe->fnhe_next = hash->chain;
|
||||
|
||||
fnhe->fnhe_genid = genid;
|
||||
fnhe->fnhe_daddr = daddr;
|
||||
fnhe->fnhe_gw = gw;
|
||||
@ -693,6 +705,8 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
|
||||
fnhe->fnhe_mtu_locked = lock;
|
||||
fnhe->fnhe_expires = max(1UL, expires);
|
||||
|
||||
rcu_assign_pointer(hash->chain, fnhe);
|
||||
|
||||
/* Exception created; mark the cached routes for the nexthop
|
||||
* stale, so anyone caching it rechecks if this exception
|
||||
* applies to them.
|
||||
@ -3170,7 +3184,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
|
||||
udph = skb_put_zero(skb, sizeof(struct udphdr));
|
||||
udph->source = sport;
|
||||
udph->dest = dport;
|
||||
udph->len = sizeof(struct udphdr);
|
||||
udph->len = htons(sizeof(struct udphdr));
|
||||
udph->check = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -2451,6 +2451,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
|
||||
static void *tcp_seek_last_pos(struct seq_file *seq)
|
||||
{
|
||||
struct tcp_iter_state *st = seq->private;
|
||||
int bucket = st->bucket;
|
||||
int offset = st->offset;
|
||||
int orig_num = st->num;
|
||||
void *rc = NULL;
|
||||
@ -2461,7 +2462,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
|
||||
break;
|
||||
st->state = TCP_SEQ_STATE_LISTENING;
|
||||
rc = listening_get_next(seq, NULL);
|
||||
while (offset-- && rc)
|
||||
while (offset-- && rc && bucket == st->bucket)
|
||||
rc = listening_get_next(seq, rc);
|
||||
if (rc)
|
||||
break;
|
||||
@ -2472,7 +2473,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
|
||||
if (st->bucket > tcp_hashinfo.ehash_mask)
|
||||
break;
|
||||
rc = established_get_first(seq);
|
||||
while (offset-- && rc)
|
||||
while (offset-- && rc && bucket == st->bucket)
|
||||
rc = established_get_next(seq, rc);
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
|
||||
struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
|
||||
lockdep_is_held(&rt->fib6_table->tb6_lock));
|
||||
|
||||
/* paired with smp_rmb() in rt6_get_cookie_safe() */
|
||||
/* paired with smp_rmb() in fib6_get_cookie_safe() */
|
||||
smp_wmb();
|
||||
while (fn) {
|
||||
fn->fn_sernum = sernum;
|
||||
|
@ -629,6 +629,8 @@ static int gre_rcv(struct sk_buff *skb)
|
||||
|
||||
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
|
||||
{
|
||||
if (csum && skb_checksum_start(skb) < skb->data)
|
||||
return -EINVAL;
|
||||
return iptunnel_handle_offloads(skb,
|
||||
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
|
||||
}
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/siphash.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/snmp.h>
|
||||
#include <net/ipv6.h>
|
||||
@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
|
||||
static u32 rt6_exception_hash(const struct in6_addr *dst,
|
||||
const struct in6_addr *src)
|
||||
{
|
||||
static u32 seed __read_mostly;
|
||||
u32 val;
|
||||
static siphash_key_t rt6_exception_key __read_mostly;
|
||||
struct {
|
||||
struct in6_addr dst;
|
||||
struct in6_addr src;
|
||||
} __aligned(SIPHASH_ALIGNMENT) combined = {
|
||||
.dst = *dst,
|
||||
};
|
||||
u64 val;
|
||||
|
||||
net_get_random_once(&seed, sizeof(seed));
|
||||
val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
|
||||
net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
|
||||
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
if (src)
|
||||
val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
|
||||
combined.src = *src;
|
||||
#endif
|
||||
return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
|
||||
val = siphash(&combined, sizeof(combined), &rt6_exception_key);
|
||||
|
||||
return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
|
||||
}
|
||||
|
||||
/* Helper function to find the cached rt in the hash table
|
||||
@ -1649,6 +1657,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
|
||||
struct in6_addr *src_key = NULL;
|
||||
struct rt6_exception *rt6_ex;
|
||||
struct fib6_nh *nh = res->nh;
|
||||
int max_depth;
|
||||
int err = 0;
|
||||
|
||||
spin_lock_bh(&rt6_exception_lock);
|
||||
@ -1703,7 +1712,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
|
||||
bucket->depth++;
|
||||
net->ipv6.rt6_stats->fib_rt_cache++;
|
||||
|
||||
if (bucket->depth > FIB6_MAX_DEPTH)
|
||||
/* Randomize max depth to avoid some side channels attacks. */
|
||||
max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
|
||||
while (bucket->depth > max_depth)
|
||||
rt6_exception_remove_oldest(bucket);
|
||||
|
||||
out:
|
||||
|
@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work)
|
||||
flush_work(&local->radar_detected_work);
|
||||
|
||||
rtnl_lock();
|
||||
/* we might do interface manipulations, so need both */
|
||||
wiphy_lock(local->hw.wiphy);
|
||||
|
||||
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
|
||||
"%s called with hardware scan in progress\n", __func__);
|
||||
@ -1018,7 +1020,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
||||
|
||||
iftd = &sband->iftype_data[i];
|
||||
|
||||
supp_he = supp_he || (iftd && iftd->he_cap.has_he);
|
||||
supp_he = supp_he || iftd->he_cap.has_he;
|
||||
}
|
||||
|
||||
/* HT, VHT, HE require QoS, thus >= 4 queues */
|
||||
|
@ -3242,7 +3242,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
|
||||
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
|
||||
return true;
|
||||
|
||||
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
|
||||
if (!ieee80211_amsdu_realloc_pad(local, skb,
|
||||
sizeof(*amsdu_hdr) +
|
||||
local->hw.extra_tx_headroom))
|
||||
return false;
|
||||
|
||||
data = skb_push(skb, sizeof(*amsdu_hdr));
|
||||
|
@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
|
||||
return subflow->mp_capable;
|
||||
}
|
||||
|
||||
if (mp_opt->dss && mp_opt->use_ack) {
|
||||
if ((mp_opt->dss && mp_opt->use_ack) ||
|
||||
(mp_opt->add_addr && !mp_opt->echo)) {
|
||||
/* subflows are fully established as soon as we get any
|
||||
* additional ack.
|
||||
* additional ack, including ADD_ADDR.
|
||||
*/
|
||||
subflow->fully_established = 1;
|
||||
WRITE_ONCE(msk->fully_established, true);
|
||||
goto fully_established;
|
||||
}
|
||||
|
||||
if (mp_opt->add_addr) {
|
||||
WRITE_ONCE(msk->fully_established, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If the first established packet does not contain MP_CAPABLE + data
|
||||
* then fallback to TCP. Fallback scenarios requires a reset for
|
||||
* MP_JOIN subflows.
|
||||
|
@ -1135,36 +1135,12 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct addr_entry_release_work {
|
||||
struct rcu_work rwork;
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
};
|
||||
|
||||
static void mptcp_pm_release_addr_entry(struct work_struct *work)
|
||||
/* caller must ensure the RCU grace period is already elapsed */
|
||||
static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
|
||||
{
|
||||
struct addr_entry_release_work *w;
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
|
||||
w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
|
||||
entry = w->entry;
|
||||
if (entry) {
|
||||
if (entry->lsk)
|
||||
sock_release(entry->lsk);
|
||||
kfree(entry);
|
||||
}
|
||||
kfree(w);
|
||||
}
|
||||
|
||||
static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
|
||||
{
|
||||
struct addr_entry_release_work *w;
|
||||
|
||||
w = kmalloc(sizeof(*w), GFP_ATOMIC);
|
||||
if (w) {
|
||||
INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
|
||||
w->entry = entry;
|
||||
queue_rcu_work(system_wq, &w->rwork);
|
||||
}
|
||||
if (entry->lsk)
|
||||
sock_release(entry->lsk);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static int mptcp_nl_remove_id_zero_address(struct net *net,
|
||||
@ -1244,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
|
||||
spin_unlock_bh(&pernet->lock);
|
||||
|
||||
mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
|
||||
mptcp_pm_free_addr_entry(entry);
|
||||
synchronize_rcu();
|
||||
__mptcp_pm_release_addr_entry(entry);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1297,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
|
||||
}
|
||||
}
|
||||
|
||||
/* caller must ensure the RCU grace period is already elapsed */
|
||||
static void __flush_addrs(struct list_head *list)
|
||||
{
|
||||
while (!list_empty(list)) {
|
||||
@ -1305,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
|
||||
cur = list_entry(list->next,
|
||||
struct mptcp_pm_addr_entry, list);
|
||||
list_del_rcu(&cur->list);
|
||||
mptcp_pm_free_addr_entry(cur);
|
||||
__mptcp_pm_release_addr_entry(cur);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1329,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
|
||||
bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
|
||||
spin_unlock_bh(&pernet->lock);
|
||||
mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
|
||||
synchronize_rcu();
|
||||
__flush_addrs(&free_list);
|
||||
return 0;
|
||||
}
|
||||
@ -1939,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
|
||||
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
|
||||
|
||||
/* net is removed from namespace list, can't race with
|
||||
* other modifiers
|
||||
* other modifiers, also netns core already waited for a
|
||||
* RCU grace period.
|
||||
*/
|
||||
__flush_addrs(&pernet->local_addr_list);
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* x_tables module for setting the IPv4/IPv6 DSCP field, Version 1.8
|
||||
/* IP tables module for matching the value of the IPv4/IPv6 DSCP field
|
||||
*
|
||||
* (C) 2002 by Harald Welte <laforge@netfilter.org>
|
||||
* based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com>
|
||||
*
|
||||
* See RFC2474 for a description of the DSCP field within the IP Header.
|
||||
*/
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -14,148 +11,100 @@
|
||||
#include <net/dsfield.h>
|
||||
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter/xt_DSCP.h>
|
||||
#include <linux/netfilter/xt_dscp.h>
|
||||
|
||||
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
||||
MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
|
||||
MODULE_DESCRIPTION("Xtables: DSCP/TOS field match");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("ipt_DSCP");
|
||||
MODULE_ALIAS("ip6t_DSCP");
|
||||
MODULE_ALIAS("ipt_TOS");
|
||||
MODULE_ALIAS("ip6t_TOS");
|
||||
MODULE_ALIAS("ipt_dscp");
|
||||
MODULE_ALIAS("ip6t_dscp");
|
||||
MODULE_ALIAS("ipt_tos");
|
||||
MODULE_ALIAS("ip6t_tos");
|
||||
|
||||
static unsigned int
|
||||
dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
static bool
|
||||
dscp_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_DSCP_info *dinfo = par->targinfo;
|
||||
const struct xt_dscp_info *info = par->matchinfo;
|
||||
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
|
||||
|
||||
if (dscp != dinfo->dscp) {
|
||||
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
|
||||
return NF_DROP;
|
||||
|
||||
ipv4_change_dsfield(ip_hdr(skb),
|
||||
(__force __u8)(~XT_DSCP_MASK),
|
||||
dinfo->dscp << XT_DSCP_SHIFT);
|
||||
|
||||
}
|
||||
return XT_CONTINUE;
|
||||
return (dscp == info->dscp) ^ !!info->invert;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
static bool
|
||||
dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_DSCP_info *dinfo = par->targinfo;
|
||||
const struct xt_dscp_info *info = par->matchinfo;
|
||||
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
|
||||
|
||||
if (dscp != dinfo->dscp) {
|
||||
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
|
||||
return NF_DROP;
|
||||
|
||||
ipv6_change_dsfield(ipv6_hdr(skb),
|
||||
(__force __u8)(~XT_DSCP_MASK),
|
||||
dinfo->dscp << XT_DSCP_SHIFT);
|
||||
}
|
||||
return XT_CONTINUE;
|
||||
return (dscp == info->dscp) ^ !!info->invert;
|
||||
}
|
||||
|
||||
static int dscp_tg_check(const struct xt_tgchk_param *par)
|
||||
static int dscp_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
const struct xt_DSCP_info *info = par->targinfo;
|
||||
const struct xt_dscp_info *info = par->matchinfo;
|
||||
|
||||
if (info->dscp > XT_DSCP_MAX)
|
||||
return -EDOM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_tos_target_info *info = par->targinfo;
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
u_int8_t orig, nv;
|
||||
const struct xt_tos_match_info *info = par->matchinfo;
|
||||
|
||||
orig = ipv4_get_dsfield(iph);
|
||||
nv = (orig & ~info->tos_mask) ^ info->tos_value;
|
||||
|
||||
if (orig != nv) {
|
||||
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
|
||||
return NF_DROP;
|
||||
iph = ip_hdr(skb);
|
||||
ipv4_change_dsfield(iph, 0, nv);
|
||||
}
|
||||
|
||||
return XT_CONTINUE;
|
||||
if (xt_family(par) == NFPROTO_IPV4)
|
||||
return ((ip_hdr(skb)->tos & info->tos_mask) ==
|
||||
info->tos_value) ^ !!info->invert;
|
||||
else
|
||||
return ((ipv6_get_dsfield(ipv6_hdr(skb)) & info->tos_mask) ==
|
||||
info->tos_value) ^ !!info->invert;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_tos_target_info *info = par->targinfo;
|
||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
u_int8_t orig, nv;
|
||||
|
||||
orig = ipv6_get_dsfield(iph);
|
||||
nv = (orig & ~info->tos_mask) ^ info->tos_value;
|
||||
|
||||
if (orig != nv) {
|
||||
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
|
||||
return NF_DROP;
|
||||
iph = ipv6_hdr(skb);
|
||||
ipv6_change_dsfield(iph, 0, nv);
|
||||
}
|
||||
|
||||
return XT_CONTINUE;
|
||||
}
|
||||
|
||||
static struct xt_target dscp_tg_reg[] __read_mostly = {
|
||||
static struct xt_match dscp_mt_reg[] __read_mostly = {
|
||||
{
|
||||
.name = "DSCP",
|
||||
.name = "dscp",
|
||||
.family = NFPROTO_IPV4,
|
||||
.checkentry = dscp_tg_check,
|
||||
.target = dscp_tg,
|
||||
.targetsize = sizeof(struct xt_DSCP_info),
|
||||
.table = "mangle",
|
||||
.checkentry = dscp_mt_check,
|
||||
.match = dscp_mt,
|
||||
.matchsize = sizeof(struct xt_dscp_info),
|
||||
.me = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.name = "DSCP",
|
||||
.name = "dscp",
|
||||
.family = NFPROTO_IPV6,
|
||||
.checkentry = dscp_tg_check,
|
||||
.target = dscp_tg6,
|
||||
.targetsize = sizeof(struct xt_DSCP_info),
|
||||
.table = "mangle",
|
||||
.checkentry = dscp_mt_check,
|
||||
.match = dscp_mt6,
|
||||
.matchsize = sizeof(struct xt_dscp_info),
|
||||
.me = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.name = "TOS",
|
||||
.name = "tos",
|
||||
.revision = 1,
|
||||
.family = NFPROTO_IPV4,
|
||||
.table = "mangle",
|
||||
.target = tos_tg,
|
||||
.targetsize = sizeof(struct xt_tos_target_info),
|
||||
.match = tos_mt,
|
||||
.matchsize = sizeof(struct xt_tos_match_info),
|
||||
.me = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.name = "TOS",
|
||||
.name = "tos",
|
||||
.revision = 1,
|
||||
.family = NFPROTO_IPV6,
|
||||
.table = "mangle",
|
||||
.target = tos_tg6,
|
||||
.targetsize = sizeof(struct xt_tos_target_info),
|
||||
.match = tos_mt,
|
||||
.matchsize = sizeof(struct xt_tos_match_info),
|
||||
.me = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init dscp_tg_init(void)
|
||||
static int __init dscp_mt_init(void)
|
||||
{
|
||||
return xt_register_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
|
||||
return xt_register_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg));
|
||||
}
|
||||
|
||||
static void __exit dscp_tg_exit(void)
|
||||
static void __exit dscp_mt_exit(void)
|
||||
{
|
||||
xt_unregister_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
|
||||
xt_unregister_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg));
|
||||
}
|
||||
|
||||
module_init(dscp_tg_init);
|
||||
module_exit(dscp_tg_exit);
|
||||
module_init(dscp_mt_init);
|
||||
module_exit(dscp_mt_exit);
|
||||
|
@ -187,14 +187,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
|
||||
}
|
||||
doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
|
||||
sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (doi_def->map.std->lvl.local == NULL) {
|
||||
ret_val = -ENOMEM;
|
||||
goto add_std_failure;
|
||||
}
|
||||
doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
|
||||
sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (doi_def->map.std->lvl.cipso == NULL) {
|
||||
ret_val = -ENOMEM;
|
||||
goto add_std_failure;
|
||||
@ -263,7 +263,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
|
||||
doi_def->map.std->cat.local = kcalloc(
|
||||
doi_def->map.std->cat.local_size,
|
||||
sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (doi_def->map.std->cat.local == NULL) {
|
||||
ret_val = -ENOMEM;
|
||||
goto add_std_failure;
|
||||
@ -271,7 +271,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
|
||||
doi_def->map.std->cat.cipso = kcalloc(
|
||||
doi_def->map.std->cat.cipso_size,
|
||||
sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (doi_def->map.std->cat.cipso == NULL) {
|
||||
ret_val = -ENOMEM;
|
||||
goto add_std_failure;
|
||||
|
@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
|
||||
}
|
||||
|
||||
skb->dev = vport->dev;
|
||||
skb->tstamp = 0;
|
||||
vport->ops->send(skb);
|
||||
return;
|
||||
|
||||
|
@ -15,7 +15,6 @@ struct qrtr_mhi_dev {
|
||||
struct qrtr_endpoint ep;
|
||||
struct mhi_device *mhi_dev;
|
||||
struct device *dev;
|
||||
struct completion ready;
|
||||
};
|
||||
|
||||
/* From MHI to QRTR */
|
||||
@ -51,10 +50,6 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
|
||||
int rc;
|
||||
|
||||
rc = wait_for_completion_interruptible(&qdev->ready);
|
||||
if (rc)
|
||||
goto free_skb;
|
||||
|
||||
if (skb->sk)
|
||||
sock_hold(skb->sk);
|
||||
|
||||
@ -84,7 +79,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
|
||||
int rc;
|
||||
|
||||
/* start channels */
|
||||
rc = mhi_prepare_for_transfer(mhi_dev, 0);
|
||||
rc = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -101,15 +96,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* start channels */
|
||||
rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
|
||||
if (rc) {
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
dev_set_drvdata(&mhi_dev->dev, NULL);
|
||||
return rc;
|
||||
}
|
||||
|
||||
complete_all(&qdev->ready);
|
||||
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
|
||||
|
||||
return 0;
|
||||
|
@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (len != ALIGN(size, 4) + hdrlen)
|
||||
if (!size || size & 3 || len != size + hdrlen)
|
||||
goto err;
|
||||
|
||||
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
|
||||
@ -506,8 +506,12 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
||||
|
||||
if (cb->type == QRTR_TYPE_NEW_SERVER) {
|
||||
/* Remote node endpoint can bridge other distant nodes */
|
||||
const struct qrtr_ctrl_pkt *pkt = data + hdrlen;
|
||||
const struct qrtr_ctrl_pkt *pkt;
|
||||
|
||||
if (size < sizeof(*pkt))
|
||||
goto err;
|
||||
|
||||
pkt = data + hdrlen;
|
||||
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
|
||||
}
|
||||
|
||||
|
@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
|
||||
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
|
||||
&off, PAGE_SIZE);
|
||||
if (unlikely(ret != ibmr->sg_len))
|
||||
if (unlikely(ret != ibmr->sg_dma_len))
|
||||
return ret < 0 ? ret : -EINVAL;
|
||||
|
||||
if (cmpxchg(&frmr->fr_state,
|
||||
|
@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
|
||||
skip_hash:
|
||||
if (flow_override)
|
||||
flow_hash = flow_override - 1;
|
||||
else if (use_skbhash)
|
||||
else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
|
||||
flow_hash = skb->hash;
|
||||
if (host_override) {
|
||||
dsthost_hash = host_override - 1;
|
||||
|
@ -1614,7 +1614,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
|
||||
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
||||
if (err) {
|
||||
kfree(cl);
|
||||
return err;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (tca[TCA_RATE]) {
|
||||
|
@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
sch_tree_lock(sch);
|
||||
|
||||
q->nbands = nbands;
|
||||
for (i = nstrict; i < q->nstrict; i++) {
|
||||
INIT_LIST_HEAD(&q->classes[i].alist);
|
||||
if (q->classes[i].qdisc->q.qlen) {
|
||||
list_add_tail(&q->classes[i].alist, &q->active);
|
||||
q->classes[i].deficit = quanta[i];
|
||||
}
|
||||
}
|
||||
q->nstrict = nstrict;
|
||||
memcpy(q->prio2band, priomap, sizeof(priomap));
|
||||
|
||||
|
@ -125,6 +125,7 @@ struct htb_class {
|
||||
struct htb_class_leaf {
|
||||
int deficit[TC_HTB_MAXDEPTH];
|
||||
struct Qdisc *q;
|
||||
struct netdev_queue *offload_queue;
|
||||
} leaf;
|
||||
struct htb_class_inner {
|
||||
struct htb_prio clprio[TC_HTB_NUMPRIO];
|
||||
@ -1411,24 +1412,47 @@ htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
|
||||
return old_q;
|
||||
}
|
||||
|
||||
static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
|
||||
static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
|
||||
{
|
||||
struct netdev_queue *queue;
|
||||
|
||||
queue = cl->leaf.offload_queue;
|
||||
if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
|
||||
WARN_ON(cl->leaf.q->dev_queue != queue);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
||||
static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
|
||||
struct htb_class *cl_new, bool destroying)
|
||||
{
|
||||
struct netdev_queue *queue_old, *queue_new;
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct Qdisc *qdisc;
|
||||
|
||||
queue_old = netdev_get_tx_queue(dev, qid_old);
|
||||
queue_new = netdev_get_tx_queue(dev, qid_new);
|
||||
queue_old = htb_offload_get_queue(cl_old);
|
||||
queue_new = htb_offload_get_queue(cl_new);
|
||||
|
||||
if (dev->flags & IFF_UP)
|
||||
dev_deactivate(dev);
|
||||
qdisc = dev_graft_qdisc(queue_old, NULL);
|
||||
qdisc->dev_queue = queue_new;
|
||||
qdisc = dev_graft_qdisc(queue_new, qdisc);
|
||||
if (dev->flags & IFF_UP)
|
||||
dev_activate(dev);
|
||||
if (!destroying) {
|
||||
struct Qdisc *qdisc;
|
||||
|
||||
WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
|
||||
if (dev->flags & IFF_UP)
|
||||
dev_deactivate(dev);
|
||||
qdisc = dev_graft_qdisc(queue_old, NULL);
|
||||
WARN_ON(qdisc != cl_old->leaf.q);
|
||||
}
|
||||
|
||||
if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
|
||||
cl_old->leaf.q->dev_queue = queue_new;
|
||||
cl_old->leaf.offload_queue = queue_new;
|
||||
|
||||
if (!destroying) {
|
||||
struct Qdisc *qdisc;
|
||||
|
||||
qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
|
||||
if (dev->flags & IFF_UP)
|
||||
dev_activate(dev);
|
||||
WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
|
||||
}
|
||||
}
|
||||
|
||||
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
||||
@ -1442,10 +1466,8 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
||||
if (cl->level)
|
||||
return -EINVAL;
|
||||
|
||||
if (q->offload) {
|
||||
dev_queue = new->dev_queue;
|
||||
WARN_ON(dev_queue != cl->leaf.q->dev_queue);
|
||||
}
|
||||
if (q->offload)
|
||||
dev_queue = htb_offload_get_queue(cl);
|
||||
|
||||
if (!new) {
|
||||
new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
|
||||
@ -1514,6 +1536,8 @@ static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
|
||||
parent->ctokens = parent->cbuffer;
|
||||
parent->t_c = ktime_get_ns();
|
||||
parent->cmode = HTB_CAN_SEND;
|
||||
if (q->offload)
|
||||
parent->leaf.offload_queue = cl->leaf.offload_queue;
|
||||
}
|
||||
|
||||
static void htb_parent_to_leaf_offload(struct Qdisc *sch,
|
||||
@ -1534,6 +1558,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_htb_qopt_offload offload_opt;
|
||||
struct netdev_queue *dev_queue;
|
||||
struct Qdisc *q = cl->leaf.q;
|
||||
struct Qdisc *old = NULL;
|
||||
int err;
|
||||
@ -1542,16 +1567,15 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON(!q);
|
||||
if (!destroying) {
|
||||
/* On destroy of HTB, two cases are possible:
|
||||
* 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
|
||||
* 2. q is a noop qdisc (for nodes that were inner),
|
||||
* q->dev_queue is noop_netdev_queue.
|
||||
dev_queue = htb_offload_get_queue(cl);
|
||||
old = htb_graft_helper(dev_queue, NULL);
|
||||
if (destroying)
|
||||
/* Before HTB is destroyed, the kernel grafts noop_qdisc to
|
||||
* all queues.
|
||||
*/
|
||||
old = htb_graft_helper(q->dev_queue, NULL);
|
||||
WARN_ON(!old);
|
||||
WARN_ON(!(old->flags & TCQ_F_BUILTIN));
|
||||
else
|
||||
WARN_ON(old != q);
|
||||
}
|
||||
|
||||
if (cl->parent) {
|
||||
cl->parent->bstats_bias.bytes += q->bstats.bytes;
|
||||
@ -1570,18 +1594,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
if (!err || destroying)
|
||||
qdisc_put(old);
|
||||
else
|
||||
htb_graft_helper(q->dev_queue, old);
|
||||
htb_graft_helper(dev_queue, old);
|
||||
|
||||
if (last_child)
|
||||
return err;
|
||||
|
||||
if (!err && offload_opt.moved_qid != 0) {
|
||||
if (destroying)
|
||||
q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
|
||||
offload_opt.qid);
|
||||
else
|
||||
htb_offload_move_qdisc(sch, offload_opt.moved_qid,
|
||||
offload_opt.qid);
|
||||
if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
|
||||
u32 classid = TC_H_MAJ(sch->handle) |
|
||||
TC_H_MIN(offload_opt.classid);
|
||||
struct htb_class *moved_cl = htb_find(classid, sch);
|
||||
|
||||
htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -1704,9 +1727,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
|
||||
}
|
||||
|
||||
if (last_child) {
|
||||
struct netdev_queue *dev_queue;
|
||||
struct netdev_queue *dev_queue = sch->dev_queue;
|
||||
|
||||
if (q->offload)
|
||||
dev_queue = htb_offload_get_queue(cl);
|
||||
|
||||
dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
|
||||
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
|
||||
cl->parent->common.classid,
|
||||
NULL);
|
||||
@ -1878,7 +1903,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
}
|
||||
dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
|
||||
} else { /* First child. */
|
||||
dev_queue = parent->leaf.q->dev_queue;
|
||||
dev_queue = htb_offload_get_queue(parent);
|
||||
old_q = htb_graft_helper(dev_queue, NULL);
|
||||
WARN_ON(old_q != parent->leaf.q);
|
||||
offload_opt = (struct tc_htb_qopt_offload) {
|
||||
@ -1935,6 +1960,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
|
||||
/* leaf (we) needs elementary qdisc */
|
||||
cl->leaf.q = new_q ? new_q : &noop_qdisc;
|
||||
if (q->offload)
|
||||
cl->leaf.offload_queue = dev_queue;
|
||||
|
||||
cl->parent = parent;
|
||||
|
||||
|
@ -1109,7 +1109,7 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
||||
rtnl_unlock();
|
||||
if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
|
||||
err = -EFAULT;
|
||||
} else {
|
||||
} else if (is_socket_ioctl_cmd(cmd)) {
|
||||
struct ifreq ifr;
|
||||
bool need_copyout;
|
||||
if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
|
||||
@ -1118,6 +1118,8 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
||||
if (!err && need_copyout)
|
||||
if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
err = -ENOTTY;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -3306,6 +3308,8 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
|
||||
struct ifreq ifreq;
|
||||
u32 data32;
|
||||
|
||||
if (!is_socket_ioctl_cmd(cmd))
|
||||
return -ENOTTY;
|
||||
if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
|
||||
return -EFAULT;
|
||||
if (get_user(data32, &u_ifreq32->ifr_data))
|
||||
|
@ -1629,6 +1629,21 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_max_payload);
|
||||
|
||||
/**
|
||||
* svc_proc_name - Return RPC procedure name in string form
|
||||
* @rqstp: svc_rqst to operate on
|
||||
*
|
||||
* Return value:
|
||||
* Pointer to a NUL-terminated string
|
||||
*/
|
||||
const char *svc_proc_name(const struct svc_rqst *rqstp)
|
||||
{
|
||||
if (rqstp && rqstp->rq_procinfo)
|
||||
return rqstp->rq_procinfo->pc_name;
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* svc_encode_result_payload - mark a range of bytes as a result payload
|
||||
* @rqstp: svc_rqst to operate on
|
||||
|
@ -835,7 +835,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
|
||||
rqstp->rq_stime = ktime_get();
|
||||
rqstp->rq_reserved = serv->sv_max_mesg;
|
||||
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
|
||||
}
|
||||
} else
|
||||
svc_xprt_received(xprt);
|
||||
out:
|
||||
trace_svc_handle_xprt(xprt, len);
|
||||
return len;
|
||||
|
@ -1518,7 +1518,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
|
||||
if (unlikely(syn && !rc)) {
|
||||
tipc_set_sk_state(sk, TIPC_CONNECTING);
|
||||
if (timeout) {
|
||||
if (dlen && timeout) {
|
||||
timeout = msecs_to_jiffies(timeout);
|
||||
tipc_wait_for_connect(sock, &timeout);
|
||||
}
|
||||
|
@ -831,7 +831,7 @@ int main(int argc, char **argv)
|
||||
memset(cpu, 0, n_cpus * sizeof(int));
|
||||
|
||||
/* Parse commands line args */
|
||||
while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:",
|
||||
while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:n",
|
||||
long_options, &longindex)) != -1) {
|
||||
switch (opt) {
|
||||
case 'd':
|
||||
|
@ -13,13 +13,15 @@ root_check_run_with_sudo "$@"
|
||||
# Parameter parsing via include
|
||||
source ${basedir}/parameters.sh
|
||||
# Set some default params, if they didn't get set
|
||||
[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
|
||||
if [ -z "$DEST_IP" ]; then
|
||||
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
|
||||
fi
|
||||
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
|
||||
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
|
||||
[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
|
||||
if [ -n "$DEST_IP" ]; then
|
||||
validate_addr $DEST_IP
|
||||
read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
|
||||
validate_addr${IP6} $DEST_IP
|
||||
read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
|
||||
fi
|
||||
if [ -n "$DST_PORT" ]; then
|
||||
read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
|
||||
@ -62,8 +64,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
|
||||
|
||||
# Single destination
|
||||
pg_set $dev "dst_mac $DST_MAC"
|
||||
pg_set $dev "dst_min $DST_MIN"
|
||||
pg_set $dev "dst_max $DST_MAX"
|
||||
pg_set $dev "dst${IP6}_min $DST_MIN"
|
||||
pg_set $dev "dst${IP6}_max $DST_MAX"
|
||||
|
||||
if [ -n "$DST_PORT" ]; then
|
||||
# Single destination port or random port range
|
||||
|
@ -17,14 +17,16 @@ root_check_run_with_sudo "$@"
|
||||
# Parameter parsing via include
|
||||
source ${basedir}/parameters.sh
|
||||
# Set some default params, if they didn't get set
|
||||
[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
|
||||
if [ -z "$DEST_IP" ]; then
|
||||
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
|
||||
fi
|
||||
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
|
||||
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
|
||||
[ -z "$BURST" ] && BURST=32
|
||||
[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
|
||||
if [ -n "$DEST_IP" ]; then
|
||||
validate_addr $DEST_IP
|
||||
read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
|
||||
validate_addr${IP6} $DEST_IP
|
||||
read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
|
||||
fi
|
||||
if [ -n "$DST_PORT" ]; then
|
||||
read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
|
||||
@ -52,8 +54,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
|
||||
|
||||
# Single destination
|
||||
pg_set $dev "dst_mac $DST_MAC"
|
||||
pg_set $dev "dst_min $DST_MIN"
|
||||
pg_set $dev "dst_max $DST_MAX"
|
||||
pg_set $dev "dst${IP6}_min $DST_MIN"
|
||||
pg_set $dev "dst${IP6}_max $DST_MAX"
|
||||
|
||||
if [ -n "$DST_PORT" ]; then
|
||||
# Single destination port or random port range
|
||||
|
@ -6,7 +6,6 @@ config IMA
|
||||
select SECURITYFS
|
||||
select CRYPTO
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_HASH_INFO
|
||||
select TCG_TPM if HAS_IOMEM && !UML
|
||||
|
@ -21,7 +21,7 @@ struct key *ima_blacklist_keyring;
|
||||
/*
|
||||
* Allocate the IMA blacklist keyring
|
||||
*/
|
||||
__init int ima_mok_init(void)
|
||||
static __init int ima_mok_init(void)
|
||||
{
|
||||
struct key_restriction *restriction;
|
||||
|
||||
|
@ -1746,7 +1746,7 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
|
||||
channels = params_channels(params);
|
||||
frame_size = snd_pcm_format_size(format, channels);
|
||||
if (frame_size > 0)
|
||||
params->fifo_size /= (unsigned)frame_size;
|
||||
params->fifo_size /= frame_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
|
||||
struct cmp_connection *conn;
|
||||
enum cmp_direction c_dir;
|
||||
enum amdtp_stream_direction s_dir;
|
||||
unsigned int flags = CIP_UNAWARE_SYT;
|
||||
unsigned int flags = 0;
|
||||
int err;
|
||||
|
||||
if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION))
|
||||
@ -161,6 +161,13 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
|
||||
else
|
||||
flags |= CIP_BLOCKING;
|
||||
|
||||
// OXFW 970/971 has no function to generate playback timing according to the sequence
|
||||
// of value in syt field, thus the packet should include NO_INFO value in the field.
|
||||
// However, some models just ignore data blocks in packet with NO_INFO for audio data
|
||||
// processing.
|
||||
if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET))
|
||||
flags |= CIP_UNAWARE_SYT;
|
||||
|
||||
if (stream == &oxfw->tx_stream) {
|
||||
conn = &oxfw->out_conn;
|
||||
c_dir = CMP_OUTPUT;
|
||||
|
@ -159,8 +159,10 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
|
||||
return snd_oxfw_scs1x_add(oxfw);
|
||||
}
|
||||
|
||||
if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW)
|
||||
oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
|
||||
if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) {
|
||||
oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION |
|
||||
SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET;
|
||||
}
|
||||
|
||||
/*
|
||||
* TASCAM FireOne has physical control and requires a pair of additional
|
||||
|
@ -42,6 +42,11 @@ enum snd_oxfw_quirk {
|
||||
SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04,
|
||||
// Stanton SCS1.d and SCS1.m support unique transaction.
|
||||
SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08,
|
||||
// Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data
|
||||
// processing, while output level meter moves. Any value in syt field of packet takes
|
||||
// the device to process audio data even if the value is invalid in a point of
|
||||
// IEC 61883-1/6.
|
||||
SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10,
|
||||
};
|
||||
|
||||
/* This is an arbitrary number for convinience. */
|
||||
|
@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
|
||||
struct hda_gen_spec *spec = codec->spec;
|
||||
const struct hda_input_mux *imux;
|
||||
struct nid_path *path;
|
||||
int i, adc_idx, err = 0;
|
||||
int i, adc_idx, ret, err = 0;
|
||||
|
||||
imux = &spec->input_mux;
|
||||
adc_idx = kcontrol->id.index;
|
||||
@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
|
||||
if (!path || !path->ctls[type])
|
||||
continue;
|
||||
kcontrol->private_value = path->ctls[type];
|
||||
err = func(kcontrol, ucontrol);
|
||||
if (err < 0)
|
||||
ret = func(kcontrol, ucontrol);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
break;
|
||||
}
|
||||
if (ret > 0)
|
||||
err = 1;
|
||||
}
|
||||
mutex_unlock(&codec->control_mutex);
|
||||
if (err >= 0 && spec->cap_sync_hook)
|
||||
|
@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
|
||||
return azx_get_pos_posbuf(chip, azx_dev);
|
||||
}
|
||||
|
||||
static void azx_shutdown_chip(struct azx *chip)
|
||||
static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
|
||||
{
|
||||
azx_stop_chip(chip);
|
||||
azx_enter_link_reset(chip);
|
||||
if (!skip_link_reset)
|
||||
azx_enter_link_reset(chip);
|
||||
azx_clear_irq_pending(chip);
|
||||
display_power(chip, false);
|
||||
}
|
||||
@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip)
|
||||
static DEFINE_MUTEX(card_list_lock);
|
||||
static LIST_HEAD(card_list);
|
||||
|
||||
static void azx_shutdown_chip(struct azx *chip)
|
||||
{
|
||||
__azx_shutdown_chip(chip, false);
|
||||
}
|
||||
|
||||
static void azx_add_card_list(struct azx *chip)
|
||||
{
|
||||
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
|
||||
@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci)
|
||||
return;
|
||||
chip = card->private_data;
|
||||
if (chip && chip->running)
|
||||
azx_shutdown_chip(chip);
|
||||
__azx_shutdown_chip(chip, true);
|
||||
}
|
||||
|
||||
/* PCI IDs */
|
||||
|
@ -6658,6 +6658,7 @@ enum {
|
||||
ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
|
||||
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
|
||||
ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
|
||||
ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
|
||||
};
|
||||
|
||||
static const struct hda_fixup alc269_fixups[] = {
|
||||
@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||
.chained = true,
|
||||
.chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
|
||||
},
|
||||
[ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc269_fixup_limit_int_mic_boost,
|
||||
.chained = true,
|
||||
.chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
@ -8332,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
|
||||
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
|
||||
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
|
||||
SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
|
||||
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
||||
@ -8430,6 +8438,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
|
||||
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
|
||||
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
@ -8437,8 +8446,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
|
||||
SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
|
||||
SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
||||
SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
||||
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
||||
@ -9513,6 +9522,16 @@ static int patch_alc269(struct hda_codec *codec)
|
||||
|
||||
snd_hda_pick_fixup(codec, alc269_fixup_models,
|
||||
alc269_fixup_tbl, alc269_fixups);
|
||||
/* FIXME: both TX300 and ROG Strix G17 have the same SSID, and
|
||||
* the quirk breaks the latter (bko#214101).
|
||||
* Clear the wrong entry.
|
||||
*/
|
||||
if (codec->fixup_id == ALC282_FIXUP_ASUS_TX300 &&
|
||||
codec->core.vendor_id == 0x10ec0294) {
|
||||
codec_dbg(codec, "Clear wrong fixup for ASUS ROG Strix G17\n");
|
||||
codec->fixup_id = HDA_FIXUP_ID_NOT_SET;
|
||||
}
|
||||
|
||||
snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups, true);
|
||||
snd_hda_pick_pin_fixup(codec, alc269_fallback_pin_fixup_tbl, alc269_fixups, false);
|
||||
snd_hda_pick_fixup(codec, NULL, alc269_fixup_vendor_tbl,
|
||||
|
@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
|
||||
};
|
||||
|
||||
static const struct snd_pci_quirk vt2002p_fixups[] = {
|
||||
SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
|
||||
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
|
||||
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
|
||||
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
|
||||
|
@ -117,6 +117,13 @@ static struct snd_soc_dai_driver rt5682_dai[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static void rt5682_i2c_disable_regulators(void *data)
|
||||
{
|
||||
struct rt5682_priv *rt5682 = data;
|
||||
|
||||
regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
|
||||
}
|
||||
|
||||
static int rt5682_i2c_probe(struct i2c_client *i2c,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
@ -157,6 +164,11 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(&i2c->dev, rt5682_i2c_disable_regulators,
|
||||
rt5682);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
|
||||
rt5682->supplies);
|
||||
if (ret) {
|
||||
@ -282,10 +294,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
|
||||
|
||||
static int rt5682_i2c_remove(struct i2c_client *client)
|
||||
{
|
||||
struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
|
||||
|
||||
rt5682_i2c_shutdown(client);
|
||||
regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4076,6 +4076,16 @@ static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wcd9335_teardown_irqs(struct wcd9335_codec *wcd)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* disable interrupts on all slave ports */
|
||||
for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
|
||||
regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
|
||||
0x00);
|
||||
}
|
||||
|
||||
static void wcd9335_cdc_sido_ccl_enable(struct wcd9335_codec *wcd,
|
||||
bool ccl_flag)
|
||||
{
|
||||
@ -4844,6 +4854,7 @@ static void wcd9335_codec_init(struct snd_soc_component *component)
|
||||
static int wcd9335_codec_probe(struct snd_soc_component *component)
|
||||
{
|
||||
struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
snd_soc_component_init_regmap(component, wcd->regmap);
|
||||
@ -4861,7 +4872,15 @@ static int wcd9335_codec_probe(struct snd_soc_component *component)
|
||||
for (i = 0; i < NUM_CODEC_DAIS; i++)
|
||||
INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
|
||||
|
||||
return wcd9335_setup_irqs(wcd);
|
||||
ret = wcd9335_setup_irqs(wcd);
|
||||
if (ret)
|
||||
goto free_clsh_ctrl;
|
||||
|
||||
return 0;
|
||||
|
||||
free_clsh_ctrl:
|
||||
wcd_clsh_ctrl_free(wcd->clsh_ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wcd9335_codec_remove(struct snd_soc_component *comp)
|
||||
@ -4869,7 +4888,7 @@ static void wcd9335_codec_remove(struct snd_soc_component *comp)
|
||||
struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
|
||||
|
||||
wcd_clsh_ctrl_free(wcd->clsh_ctrl);
|
||||
free_irq(regmap_irq_get_virq(wcd->irq_data, WCD9335_IRQ_SLIMBUS), wcd);
|
||||
wcd9335_teardown_irqs(wcd);
|
||||
}
|
||||
|
||||
static int wcd9335_codec_set_sysclk(struct snd_soc_component *comp,
|
||||
|
@ -747,6 +747,8 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
|
||||
static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
|
||||
{
|
||||
wm_adsp_debugfs_clear(dsp);
|
||||
debugfs_remove_recursive(dsp->debugfs_root);
|
||||
dsp->debugfs_root = NULL;
|
||||
}
|
||||
#else
|
||||
static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
|
||||
|
@ -165,25 +165,25 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/* Get the optional clocks */
|
||||
rpmsg->ipg = devm_clk_get(&pdev->dev, "ipg");
|
||||
rpmsg->ipg = devm_clk_get_optional(&pdev->dev, "ipg");
|
||||
if (IS_ERR(rpmsg->ipg))
|
||||
rpmsg->ipg = NULL;
|
||||
return PTR_ERR(rpmsg->ipg);
|
||||
|
||||
rpmsg->mclk = devm_clk_get(&pdev->dev, "mclk");
|
||||
rpmsg->mclk = devm_clk_get_optional(&pdev->dev, "mclk");
|
||||
if (IS_ERR(rpmsg->mclk))
|
||||
rpmsg->mclk = NULL;
|
||||
return PTR_ERR(rpmsg->mclk);
|
||||
|
||||
rpmsg->dma = devm_clk_get(&pdev->dev, "dma");
|
||||
rpmsg->dma = devm_clk_get_optional(&pdev->dev, "dma");
|
||||
if (IS_ERR(rpmsg->dma))
|
||||
rpmsg->dma = NULL;
|
||||
return PTR_ERR(rpmsg->dma);
|
||||
|
||||
rpmsg->pll8k = devm_clk_get(&pdev->dev, "pll8k");
|
||||
rpmsg->pll8k = devm_clk_get_optional(&pdev->dev, "pll8k");
|
||||
if (IS_ERR(rpmsg->pll8k))
|
||||
rpmsg->pll8k = NULL;
|
||||
return PTR_ERR(rpmsg->pll8k);
|
||||
|
||||
rpmsg->pll11k = devm_clk_get(&pdev->dev, "pll11k");
|
||||
rpmsg->pll11k = devm_clk_get_optional(&pdev->dev, "pll11k");
|
||||
if (IS_ERR(rpmsg->pll11k))
|
||||
rpmsg->pll11k = NULL;
|
||||
return PTR_ERR(rpmsg->pll11k);
|
||||
|
||||
platform_set_drvdata(pdev, rpmsg);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
|
||||
snd_pcm_uframes_t period_size;
|
||||
ssize_t periodbytes;
|
||||
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
|
||||
u32 buffer_addr = substream->runtime->dma_addr;
|
||||
u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
|
||||
|
||||
channels = substream->runtime->channels;
|
||||
period_size = substream->runtime->period_size;
|
||||
|
@ -199,7 +199,7 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
|
||||
}
|
||||
if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) {
|
||||
ret = snd_soc_dai_set_tdm_slot(codec_dai,
|
||||
0x03, 3, 8, 24);
|
||||
0x30, 3, 8, 16);
|
||||
if (ret < 0) {
|
||||
dev_err(runtime->dev,
|
||||
"DEV0 TDM slot err:%d\n", ret);
|
||||
@ -208,10 +208,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
|
||||
}
|
||||
if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) {
|
||||
ret = snd_soc_dai_set_tdm_slot(codec_dai,
|
||||
0x0C, 3, 8, 24);
|
||||
0xC0, 3, 8, 16);
|
||||
if (ret < 0) {
|
||||
dev_err(runtime->dev,
|
||||
"DEV0 TDM slot err:%d\n", ret);
|
||||
"DEV1 TDM slot err:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -311,24 +311,6 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
|
||||
* The above 2 loops are mutually exclusive based on the stream direction,
|
||||
* thus rtd_dpcm variable will never be overwritten
|
||||
*/
|
||||
/*
|
||||
* Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
|
||||
* where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
|
||||
* Skipping the port wise FE and BE configuration for kblda7219m98373 &
|
||||
* kblmax98373 as the topology (FE & BE) supports S24_LE only.
|
||||
*/
|
||||
|
||||
if (!strcmp(rtd->card->name, "kblda7219m98373") ||
|
||||
!strcmp(rtd->card->name, "kblmax98373")) {
|
||||
/* The ADSP will convert the FE rate to 48k, stereo */
|
||||
rate->min = rate->max = 48000;
|
||||
chan->min = chan->max = DUAL_CHANNEL;
|
||||
|
||||
/* set SSP to 24 bit */
|
||||
snd_mask_none(fmt);
|
||||
snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The ADSP will convert the FE rate to 48k, stereo, 24 bit
|
||||
@ -479,31 +461,20 @@ static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
|
||||
static int kbl_fe_startup(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
|
||||
|
||||
/*
|
||||
* On this platform for PCM device we support,
|
||||
* 48Khz
|
||||
* stereo
|
||||
* 16 bit audio
|
||||
*/
|
||||
|
||||
runtime->hw.channels_max = DUAL_CHANNEL;
|
||||
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
|
||||
&constraints_channels);
|
||||
/*
|
||||
* Setup S24_LE (32 bit container and 24 bit valid data) for
|
||||
* kblda7219m98373 & kblmax98373. For kblda7219m98927 &
|
||||
* kblmax98927 keeping it as 16/16 due to topology FW dependency.
|
||||
*/
|
||||
if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
|
||||
!strcmp(soc_rt->card->name, "kblmax98373")) {
|
||||
runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
|
||||
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
|
||||
|
||||
} else {
|
||||
runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
|
||||
snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
|
||||
}
|
||||
runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
|
||||
snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
|
||||
|
||||
snd_pcm_hw_constraint_list(runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
|
||||
@ -536,23 +507,11 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
|
||||
static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
|
||||
|
||||
runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
|
||||
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
|
||||
&constraints_channels_quad);
|
||||
|
||||
/*
|
||||
* Topology for kblda7219m98373 & kblmax98373 supports only S24_LE.
|
||||
* The DMIC also configured for S24_LE. Forcing the DMIC format to
|
||||
* S24_LE due to the topology FW dependency.
|
||||
*/
|
||||
if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
|
||||
!strcmp(soc_rt->card->name, "kblmax98373")) {
|
||||
runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
|
||||
snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
|
||||
}
|
||||
|
||||
return snd_pcm_hw_constraint_list(substream->runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
|
||||
},
|
||||
{
|
||||
.id = "DLGS7219",
|
||||
.drv_name = "cml_da7219_max98357a",
|
||||
.drv_name = "cml_da7219_mx98357a",
|
||||
.machine_quirk = snd_soc_acpi_codec_list,
|
||||
.quirk_data = &max98390_spk_codecs,
|
||||
.sof_fw_filename = "sof-cml.ri",
|
||||
|
@ -87,7 +87,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
|
||||
},
|
||||
{
|
||||
.id = "DLGS7219",
|
||||
.drv_name = "kbl_da7219_max98357a",
|
||||
.drv_name = "kbl_da7219_mx98357a",
|
||||
.fw_filename = "intel/dsp_fw_kbl.bin",
|
||||
.machine_quirk = snd_soc_acpi_codec_list,
|
||||
.quirk_data = &kbl_7219_98357_codecs,
|
||||
|
@ -113,7 +113,7 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
|
||||
|
||||
static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
|
||||
{
|
||||
struct skl_module_iface *iface = &mcfg->module->formats[0];
|
||||
struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
|
||||
|
||||
dev_dbg(skl->dev, "Dumping config\n");
|
||||
dev_dbg(skl->dev, "Input Format:\n");
|
||||
@ -195,8 +195,8 @@ static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
|
||||
struct skl_module_fmt *in_fmt, *out_fmt;
|
||||
|
||||
/* Fixups will be applied to pin 0 only */
|
||||
in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
|
||||
out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
|
||||
in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
|
||||
out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
|
||||
|
||||
if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
if (is_fe) {
|
||||
@ -239,9 +239,9 @@ static void skl_tplg_update_buffer_size(struct skl_dev *skl,
|
||||
/* Since fixups is applied to pin 0 only, ibs, obs needs
|
||||
* change for pin 0 only
|
||||
*/
|
||||
res = &mcfg->module->resources[0];
|
||||
in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
|
||||
out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
|
||||
res = &mcfg->module->resources[mcfg->res_idx];
|
||||
in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
|
||||
out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
|
||||
|
||||
if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
|
||||
multiplier = 5;
|
||||
@ -1463,12 +1463,6 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
|
||||
struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
|
||||
|
||||
if (ac->params) {
|
||||
/*
|
||||
* Widget data is expected to be stripped of T and L
|
||||
*/
|
||||
size -= 2 * sizeof(unsigned int);
|
||||
data += 2;
|
||||
|
||||
if (size > ac->max)
|
||||
return -EINVAL;
|
||||
ac->size = size;
|
||||
@ -1637,11 +1631,12 @@ int skl_tplg_update_pipe_params(struct device *dev,
|
||||
struct skl_module_cfg *mconfig,
|
||||
struct skl_pipe_params *params)
|
||||
{
|
||||
struct skl_module_res *res = &mconfig->module->resources[0];
|
||||
struct skl_module_res *res;
|
||||
struct skl_dev *skl = get_skl_ctx(dev);
|
||||
struct skl_module_fmt *format = NULL;
|
||||
u8 cfg_idx = mconfig->pipe->cur_config_idx;
|
||||
|
||||
res = &mconfig->module->resources[mconfig->res_idx];
|
||||
skl_tplg_fill_dma_id(mconfig, params);
|
||||
mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
|
||||
mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
|
||||
@ -1650,9 +1645,9 @@ int skl_tplg_update_pipe_params(struct device *dev,
|
||||
return 0;
|
||||
|
||||
if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
format = &mconfig->module->formats[0].inputs[0].fmt;
|
||||
format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
|
||||
else
|
||||
format = &mconfig->module->formats[0].outputs[0].fmt;
|
||||
format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
|
||||
|
||||
/* set the hw_params */
|
||||
format->s_freq = params->s_freq;
|
||||
|
@ -1119,25 +1119,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
|
||||
if (IS_ERR(afe->regmap)) {
|
||||
dev_err(dev, "could not get regmap from parent\n");
|
||||
return PTR_ERR(afe->regmap);
|
||||
ret = PTR_ERR(afe->regmap);
|
||||
goto err_pm_disable;
|
||||
}
|
||||
ret = regmap_attach_dev(dev, afe->regmap, &mt8183_afe_regmap_config);
|
||||
if (ret) {
|
||||
dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
rstc = devm_reset_control_get(dev, "audiosys");
|
||||
if (IS_ERR(rstc)) {
|
||||
ret = PTR_ERR(rstc);
|
||||
dev_err(dev, "could not get audiosys reset:%d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
ret = reset_control_reset(rstc);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to trigger audio reset:%d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
/* enable clock for regcache get default value from hw */
|
||||
@ -1147,7 +1148,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
ret = regmap_reinit_cache(afe->regmap, &mt8183_afe_regmap_config);
|
||||
if (ret) {
|
||||
dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
@ -1160,8 +1161,10 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->memif_size = MT8183_MEMIF_NUM;
|
||||
afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
|
||||
GFP_KERNEL);
|
||||
if (!afe->memif)
|
||||
return -ENOMEM;
|
||||
if (!afe->memif) {
|
||||
ret = -ENOMEM;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
for (i = 0; i < afe->memif_size; i++) {
|
||||
afe->memif[i].data = &memif_data[i];
|
||||
@ -1178,22 +1181,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->irqs_size = MT8183_IRQ_NUM;
|
||||
afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
|
||||
GFP_KERNEL);
|
||||
if (!afe->irqs)
|
||||
return -ENOMEM;
|
||||
if (!afe->irqs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
for (i = 0; i < afe->irqs_size; i++)
|
||||
afe->irqs[i].irq_data = &irq_data[i];
|
||||
|
||||
/* request irq */
|
||||
irq_id = platform_get_irq(pdev, 0);
|
||||
if (irq_id < 0)
|
||||
return irq_id;
|
||||
if (irq_id < 0) {
|
||||
ret = irq_id;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
|
||||
IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not request_irq for asys-isr\n");
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
/* init sub_dais */
|
||||
@ -1204,7 +1211,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
|
||||
i, ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1213,7 +1220,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
afe->mtk_afe_hardware = &mt8183_afe_hardware;
|
||||
@ -1229,7 +1236,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
NULL, 0);
|
||||
if (ret) {
|
||||
dev_warn(dev, "err_platform\n");
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
ret = devm_snd_soc_register_component(afe->dev,
|
||||
@ -1238,10 +1245,14 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->num_dai_drivers);
|
||||
if (ret) {
|
||||
dev_warn(dev, "err_dai_component\n");
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mt8183_afe_pcm_dev_remove(struct platform_device *pdev)
|
||||
|
@ -2229,12 +2229,13 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
|
||||
if (IS_ERR(afe->regmap)) {
|
||||
dev_err(dev, "could not get regmap from parent\n");
|
||||
return PTR_ERR(afe->regmap);
|
||||
ret = PTR_ERR(afe->regmap);
|
||||
goto err_pm_disable;
|
||||
}
|
||||
ret = regmap_attach_dev(dev, afe->regmap, &mt8192_afe_regmap_config);
|
||||
if (ret) {
|
||||
dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
/* enable clock for regcache get default value from hw */
|
||||
@ -2244,7 +2245,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
ret = regmap_reinit_cache(afe->regmap, &mt8192_afe_regmap_config);
|
||||
if (ret) {
|
||||
dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
@ -2257,8 +2258,10 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->memif_size = MT8192_MEMIF_NUM;
|
||||
afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
|
||||
GFP_KERNEL);
|
||||
if (!afe->memif)
|
||||
return -ENOMEM;
|
||||
if (!afe->memif) {
|
||||
ret = -ENOMEM;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
for (i = 0; i < afe->memif_size; i++) {
|
||||
afe->memif[i].data = &memif_data[i];
|
||||
@ -2272,22 +2275,26 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
|
||||
afe->irqs_size = MT8192_IRQ_NUM;
|
||||
afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
|
||||
GFP_KERNEL);
|
||||
if (!afe->irqs)
|
||||
return -ENOMEM;
|
||||
if (!afe->irqs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
for (i = 0; i < afe->irqs_size; i++)
|
||||
afe->irqs[i].irq_data = &irq_data[i];
|
||||
|
||||
/* request irq */
|
||||
irq_id = platform_get_irq(pdev, 0);
|
||||
if (irq_id < 0)
|
||||
return irq_id;
|
||||
if (irq_id < 0) {
|
||||
ret = irq_id;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq_id, mt8192_afe_irq_handler,
|
||||
IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not request_irq for Afe_ISR_Handle\n");
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
/* init sub_dais */
|
||||
|
@ -68,6 +68,7 @@ static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
|
||||
static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
|
||||
static bool ignore_ctl_error;
|
||||
static bool autoclock = true;
|
||||
static bool lowlatency = true;
|
||||
static char *quirk_alias[SNDRV_CARDS];
|
||||
static char *delayed_register[SNDRV_CARDS];
|
||||
static bool implicit_fb[SNDRV_CARDS];
|
||||
@ -92,6 +93,8 @@ MODULE_PARM_DESC(ignore_ctl_error,
|
||||
"Ignore errors from USB controller for mixer interfaces.");
|
||||
module_param(autoclock, bool, 0444);
|
||||
MODULE_PARM_DESC(autoclock, "Enable auto-clock selection for UAC2 devices (default: yes).");
|
||||
module_param(lowlatency, bool, 0444);
|
||||
MODULE_PARM_DESC(lowlatency, "Enable low latency playback (default: yes).");
|
||||
module_param_array(quirk_alias, charp, NULL, 0444);
|
||||
MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
|
||||
module_param_array(delayed_register, charp, NULL, 0444);
|
||||
@ -599,6 +602,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
|
||||
chip->setup = device_setup[idx];
|
||||
chip->generic_implicit_fb = implicit_fb[idx];
|
||||
chip->autoclock = autoclock;
|
||||
chip->lowlatency = lowlatency;
|
||||
atomic_set(&chip->active, 1); /* avoid autopm during probing */
|
||||
atomic_set(&chip->usage_count, 0);
|
||||
atomic_set(&chip->shutdown, 0);
|
||||
|
@ -94,6 +94,7 @@ struct snd_usb_endpoint {
|
||||
struct list_head ready_playback_urbs; /* playback URB FIFO for implicit fb */
|
||||
|
||||
unsigned int nurbs; /* # urbs */
|
||||
unsigned int nominal_queue_size; /* total buffer sizes in URBs */
|
||||
unsigned long active_mask; /* bitmask of active urbs */
|
||||
unsigned long unlink_mask; /* bitmask of unlinked urbs */
|
||||
char *syncbuf; /* sync buffer for all sync URBs */
|
||||
@ -187,6 +188,7 @@ struct snd_usb_substream {
|
||||
} dsd_dop;
|
||||
|
||||
bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
|
||||
bool early_playback_start; /* early start needed for playback? */
|
||||
struct media_ctl *media_ctl;
|
||||
};
|
||||
|
||||
|
@ -1126,6 +1126,10 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep)
|
||||
INIT_LIST_HEAD(&u->ready_list);
|
||||
}
|
||||
|
||||
/* total buffer bytes of all URBs plus the next queue;
|
||||
* referred in pcm.c
|
||||
*/
|
||||
ep->nominal_queue_size = maxsize * urb_packs * (ep->nurbs + 1);
|
||||
return 0;
|
||||
|
||||
out_of_memory:
|
||||
@ -1287,6 +1291,11 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
|
||||
* to be set up before parameter setups
|
||||
*/
|
||||
iface_first = ep->cur_audiofmt->protocol == UAC_VERSION_1;
|
||||
/* Workaround for Sony WALKMAN NW-A45 DAC;
|
||||
* it requires the interface setup at first like UAC1
|
||||
*/
|
||||
if (chip->usb_id == USB_ID(0x054c, 0x0b8c))
|
||||
iface_first = true;
|
||||
if (iface_first) {
|
||||
err = endpoint_set_interface(chip, ep, true);
|
||||
if (err < 0)
|
||||
|
@ -614,6 +614,15 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
|
||||
subs->period_elapsed_pending = 0;
|
||||
runtime->delay = 0;
|
||||
|
||||
/* check whether early start is needed for playback stream */
|
||||
subs->early_playback_start =
|
||||
subs->direction == SNDRV_PCM_STREAM_PLAYBACK &&
|
||||
(!chip->lowlatency ||
|
||||
(subs->data_endpoint->nominal_queue_size >= subs->buffer_bytes));
|
||||
|
||||
if (subs->early_playback_start)
|
||||
ret = start_endpoints(subs);
|
||||
|
||||
unlock:
|
||||
snd_usb_unlock_shutdown(chip);
|
||||
return ret;
|
||||
@ -1394,7 +1403,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
|
||||
subs->trigger_tstamp_pending_update = false;
|
||||
}
|
||||
|
||||
if (period_elapsed && !subs->running) {
|
||||
if (period_elapsed && !subs->running && !subs->early_playback_start) {
|
||||
subs->period_elapsed_pending = 1;
|
||||
period_elapsed = 0;
|
||||
}
|
||||
@ -1448,7 +1457,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
|
||||
prepare_playback_urb,
|
||||
retire_playback_urb,
|
||||
subs);
|
||||
if (cmd == SNDRV_PCM_TRIGGER_START) {
|
||||
if (!subs->early_playback_start &&
|
||||
cmd == SNDRV_PCM_TRIGGER_START) {
|
||||
err = start_endpoints(subs);
|
||||
if (err < 0) {
|
||||
snd_usb_endpoint_set_callback(subs->data_endpoint,
|
||||
|
@ -1904,6 +1904,7 @@ static const struct registration_quirk registration_quirks[] = {
|
||||
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
|
||||
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
|
||||
REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user