mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-31 07:12:18 +00:00
7d3018da4c
* NVME, SATA NAND Security added * Qortal Core exception fetcher is now redone. * Update DT overlays for firmware * Fix for bvb clockj settings * Fix for no audio for sissy desktop porn watchers -_- ( thanks crowetic for watching gay porn and reporting me that bug asshat ) * Normalize the fetch() stream while doing a peer to peer handshake for nodes * Fix for RNG token editing error while performing a SHA256 encryption * Now under voltage errors will blink red led constantly for 5 minutes then go solid. * Improve kernel thread scaling for Qortal 2.0 core * HDMI circuit is now enabled at power up instead. * Added KMS * Added line replication instead of interpolation for VC4 GPU resulting in slightly better frame rates * Fix for long and doubles * Backplane clock is now set at standard rate * Capped HVEC clocks * Add support for Creative Cinema webcam for donkers who like sharing dick pics. *looks at crowetic* * More scanline XGA modes for people who have weird ass monitors of all sorts. * TX/RX flow control support is now 100% stable. No lags over 1Gbps ethernet. ( Hello Qortal 3.0 ) * Using flush cache instead of fetch for QC 2.0 resulting in performance gains * VC4 clock is now enforced for desktop oriented images. * Ondemand governor now waits for 2 seconds instead of 0.5ms to scale down to the lowest safest clock freq preventing lags to the core. * Timeout of OC set at 35ms from 90ms resulting in way better clocks and sync for Qortal 2.0 core
111 lines
3.0 KiB
C
111 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Functions related to setting various queue properties from drivers
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/blk-mq.h>
|
|
#include <linux/sched/sysctl.h>
|
|
|
|
#include "blk.h"
|
|
#include "blk-mq-sched.h"
|
|
|
|
/**
|
|
* blk_end_sync_rq - executes a completion event on a request
|
|
* @rq: request to complete
|
|
* @error: end I/O status of the request
|
|
*/
|
|
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
|
{
|
|
struct completion *waiting = rq->end_io_data;
|
|
|
|
rq->end_io_data = (void *)(uintptr_t)error;
|
|
|
|
/*
|
|
* complete last, if this is a stack request the process (and thus
|
|
* the rq pointer) could be invalid right after this complete()
|
|
*/
|
|
complete(waiting);
|
|
}
|
|
|
|
/**
|
|
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
|
|
* @bd_disk: matching gendisk
|
|
* @rq: request to insert
|
|
* @at_head: insert request at head or tail of queue
|
|
* @done: I/O completion handler
|
|
*
|
|
* Description:
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
|
* for execution. Don't wait for completion.
|
|
*
|
|
* Note:
|
|
* This function will invoke @done directly if the queue is dead.
|
|
*/
|
|
void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
|
|
int at_head, rq_end_io_fn *done)
|
|
{
|
|
WARN_ON(irqs_disabled());
|
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
|
|
|
rq->rq_disk = bd_disk;
|
|
rq->end_io = done;
|
|
|
|
blk_account_io_start(rq);
|
|
|
|
/*
|
|
* don't check dying flag for MQ because the request won't
|
|
* be reused after dying flag is set
|
|
*/
|
|
blk_mq_sched_insert_request(rq, at_head, true, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
|
|
|
static bool blk_rq_is_poll(struct request *rq)
|
|
{
|
|
return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL;
|
|
}
|
|
|
|
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
|
{
|
|
do {
|
|
blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
|
|
cond_resched();
|
|
} while (!completion_done(wait));
|
|
}
|
|
|
|
/**
|
|
* blk_execute_rq - insert a request into queue for execution
|
|
* @bd_disk: matching gendisk
|
|
* @rq: request to insert
|
|
* @at_head: insert request at head or tail of queue
|
|
*
|
|
* Description:
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
|
* for execution and wait for completion.
|
|
* Return: The blk_status_t result provided to blk_mq_end_request().
|
|
*/
|
|
blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
|
|
{
|
|
DECLARE_COMPLETION_ONSTACK(wait);
|
|
unsigned long hang_check;
|
|
|
|
rq->end_io_data = &wait;
|
|
blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
|
|
|
|
/* Prevent hang_check timer from firing at us during very long I/O */
|
|
hang_check = sysctl_hung_task_timeout_secs;
|
|
|
|
if (blk_rq_is_poll(rq))
|
|
blk_rq_poll_completion(rq, &wait);
|
|
else if (hang_check)
|
|
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
|
|
else
|
|
wait_for_completion_io(&wait);
|
|
|
|
return (blk_status_t)(uintptr_t)rq->end_io_data;
|
|
}
|
|
EXPORT_SYMBOL(blk_execute_rq);
|