T3Q birth certificate is an apology from a condom factory.

This commit is contained in:
Raziel K. Crowe 2022-03-22 19:04:37 +05:00
parent bf8dec2489
commit 604a706f77
7 changed files with 83 additions and 40 deletions

View File

@ -578,9 +578,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
for (i = 0; i < nr_slots(alloc_size + offset); i++)
mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(mem->start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
/*
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
* to the tlb buffer, if we knew for sure the device will
* overwirte the entire current content. But we don't. Thus
* unconditional bounce may prevent leaking swiotlb content (i.e.
* kernel memory) to user-space.
*/
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
}
@ -647,10 +652,13 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
/*
* Unconditional bounce is necessary to avoid corruption on
* sync_*_for_cpu or dma_ummap_* when the device didn't overwrite
* the whole lengt of the bounce buffer.
*/
swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
BUG_ON(!valid_dma_direction(dir));
}
void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,

View File

@ -1496,10 +1496,12 @@ static int __init set_buf_size(char *str)
if (!str)
return 0;
buf_size = memparse(str, &str);
/* nr_entries can not be zero */
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
/*
* nr_entries can not be zero and the startup
* tests require some buffer space. Therefore
* ensure we have at least 4096 bytes of buffer.
*/
trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);

View File

@ -1195,6 +1195,26 @@ static int run_osnoise(void)
osnoise_stop_tracing();
}
/*
* In some cases, notably when running on a nohz_full CPU with
* a stopped tick PREEMPT_RCU has no way to account for QSs.
* This will eventually cause unwarranted noise as PREEMPT_RCU
* will force preemption as the means of ending the current
* grace period. We avoid this problem by calling
* rcu_momentary_dyntick_idle(), which performs a zero duration
* EQS allowing PREEMPT_RCU to end the current grace period.
* This call shouldn't be wrapped inside an RCU critical
* section.
*
* Note that in non PREEMPT_RCU kernels QSs are handled through
* cond_resched()
*/
if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
local_irq_disable();
rcu_momentary_dyntick_idle();
local_irq_enable();
}
/*
* For the non-preemptive kernel config: let threads runs, if
* they so wish.
@ -1249,6 +1269,37 @@ static int run_osnoise(void)
static struct cpumask osnoise_cpumask;
static struct cpumask save_cpumask;
/*
* osnoise_sleep - sleep until the next period
*/
static void osnoise_sleep(void)
{
u64 interval;
ktime_t wake_time;
mutex_lock(&interface_lock);
interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
mutex_unlock(&interface_lock);
/*
* differently from hwlat_detector, the osnoise tracer can run
* without a pause because preemption is on.
*/
if (!interval) {
/* Let synchronize_rcu_tasks() make progress */
cond_resched_tasks_rcu_qs();
return;
}
wake_time = ktime_add_us(ktime_get(), interval);
__set_current_state(TASK_INTERRUPTIBLE);
while (schedule_hrtimeout_range(&wake_time, 0, HRTIMER_MODE_ABS)) {
if (kthread_should_stop())
break;
}
}
/*
* osnoise_main - The osnoise detection kernel thread
*
@ -1257,30 +1308,10 @@ static struct cpumask save_cpumask;
*/
static int osnoise_main(void *data)
{
u64 interval;
while (!kthread_should_stop()) {
run_osnoise();
mutex_lock(&interface_lock);
interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
mutex_unlock(&interface_lock);
do_div(interval, USEC_PER_MSEC);
/*
* differently from hwlat_detector, the osnoise tracer can run
* without a pause because preemption is on.
*/
if (interval < 1) {
/* Let synchronize_rcu_tasks() make progress */
cond_resched_tasks_rcu_qs();
continue;
}
if (msleep_interruptible(interval))
break;
osnoise_sleep();
}
return 0;

View File

@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
bit += page->index;
set_bit(bit, wqueue->notes_bitmap);
generic_pipe_buf_release(pipe, buf);
}
// No try_steal function => no stealing
@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue,
buf->offset = offset;
buf->len = len;
buf->flags = PIPE_BUF_FLAG_WHOLE;
pipe->head = head + 1;
smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
spin_unlock_irq(&pipe->rd_wait.lock);
@ -243,7 +244,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
goto error;
}
ret = pipe_resize_ring(pipe, nr_notes);
nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
if (ret < 0)
goto error;
@ -268,7 +270,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
wqueue->notes = pages;
wqueue->notes_bitmap = bitmap;
wqueue->nr_pages = nr_pages;
wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
wqueue->nr_notes = nr_notes;
return 0;
error_p:
@ -320,7 +322,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
tf[i].info_mask & WATCH_INFO_LENGTH)
goto err_filter;
/* Ignore any unknown types */
if (tf[i].type >= sizeof(wfilter->type_filter) * 8)
if (tf[i].type >= WATCH_TYPE__NR)
continue;
nr_filter++;
}
@ -336,7 +338,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
q = wfilter->filters;
for (i = 0; i < filter.nr_filters; i++) {
if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG)
if (tf[i].type >= WATCH_TYPE__NR)
continue;
q->type = tf[i].type;
@ -371,6 +373,7 @@ static void __put_watch_queue(struct kref *kref)
for (i = 0; i < wqueue->nr_pages; i++)
__free_page(wqueue->notes[i]);
bitmap_free(wqueue->notes_bitmap);
wfilter = rcu_access_pointer(wqueue->filter);
if (wfilter)
@ -566,7 +569,7 @@ void watch_queue_clear(struct watch_queue *wqueue)
rcu_read_lock();
spin_lock_bh(&wqueue->lock);
/* Prevent new additions and prevent notifications from happening */
/* Prevent new notifications from being stored. */
wqueue->defunct = true;
while (!hlist_empty(&wqueue->watches)) {

View File

@ -45,7 +45,6 @@ config BITREVERSE
config HAVE_ARCH_BITREVERSE
bool
default n
depends on BITREVERSE
help
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.

View File

@ -349,7 +349,6 @@ void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)

View File

@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)