mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-01 07:42:18 +00:00
If we throw a stick, will Mike fetch it?
This commit is contained in:
parent
3ee9a3d6da
commit
91bdb3f820
@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* AYA NEO 2021 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
@ -185,6 +191,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
},
|
||||
.driver_data = (void *)&gpd_win2,
|
||||
}, { /* GPD Win 3 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
|
||||
},
|
||||
.driver_data = (void *)&lcd720x1280_rightside_up,
|
||||
}, { /* I.T.Works TW891 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
||||
|
@ -1916,6 +1916,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!crtc_state)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Don't clobber DPCD if it's been already read out during output
|
||||
* setup (eDP) or detect.
|
||||
|
@ -11048,12 +11048,6 @@ enum skl_power_gate {
|
||||
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
|
||||
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
|
||||
|
||||
#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
|
||||
#define BXT_REQ_DATA_MASK 0x3F
|
||||
#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
|
||||
#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
|
||||
#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
|
||||
|
||||
#define BXT_D_CR_DRP0_DUNIT8 0x1000
|
||||
#define BXT_D_CR_DRP0_DUNIT9 0x1200
|
||||
#define BXT_D_CR_DRP0_DUNIT_START 8
|
||||
@ -11084,9 +11078,7 @@ enum skl_power_gate {
|
||||
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
|
||||
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
|
||||
|
||||
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
|
||||
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
|
||||
#define SKL_REQ_DATA_MASK (0xF << 0)
|
||||
#define DG1_GEAR_TYPE REG_BIT(16)
|
||||
|
||||
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
|
||||
|
@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request,
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u64, ctx)
|
||||
__field(u32, guc_id)
|
||||
__field(u16, class)
|
||||
__field(u16, instance)
|
||||
__field(u32, seqno)
|
||||
@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request,
|
||||
__entry->dev = rq->engine->i915->drm.primary->index;
|
||||
__entry->class = rq->engine->uabi_class;
|
||||
__entry->instance = rq->engine->uabi_instance;
|
||||
__entry->guc_id = rq->context->guc_id;
|
||||
__entry->ctx = rq->fence.context;
|
||||
__entry->seqno = rq->fence.seqno;
|
||||
__entry->tail = rq->tail;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
|
||||
TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
|
||||
__entry->dev, __entry->class, __entry->instance,
|
||||
__entry->guc_id, __entry->ctx, __entry->seqno,
|
||||
__entry->tail)
|
||||
__entry->ctx, __entry->seqno, __entry->tail)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_request, i915_request_add,
|
||||
|
@ -244,7 +244,6 @@ static int
|
||||
skl_get_dram_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct dram_info *dram_info = &i915->dram_info;
|
||||
u32 mem_freq_khz, val;
|
||||
int ret;
|
||||
|
||||
dram_info->type = skl_get_dram_type(i915);
|
||||
@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = intel_uncore_read(&i915->uncore,
|
||||
SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
|
||||
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
|
||||
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
||||
|
||||
if (dram_info->num_channels * mem_freq_khz == 0) {
|
||||
drm_info(&i915->drm,
|
||||
"Couldn't get system memory bandwidth\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
|
||||
static int bxt_get_dram_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct dram_info *dram_info = &i915->dram_info;
|
||||
u32 dram_channels;
|
||||
u32 mem_freq_khz, val;
|
||||
u8 num_active_channels, valid_ranks = 0;
|
||||
u32 val;
|
||||
u8 valid_ranks = 0;
|
||||
int i;
|
||||
|
||||
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
|
||||
mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
|
||||
BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
||||
|
||||
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
|
||||
num_active_channels = hweight32(dram_channels);
|
||||
|
||||
if (mem_freq_khz * num_active_channels == 0) {
|
||||
drm_info(&i915->drm,
|
||||
"Couldn't get system memory bandwidth\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
|
||||
*/
|
||||
|
@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
|
||||
.clock = 69700,
|
||||
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 6,
|
||||
.hsync_end = 800 + 6 + 15,
|
||||
.htotal = 800 + 6 + 15 + 16,
|
||||
.hsync_start = 800 + 52,
|
||||
.hsync_end = 800 + 52 + 8,
|
||||
.htotal = 800 + 52 + 8 + 48,
|
||||
|
||||
.vdisplay = 1280,
|
||||
.vsync_start = 1280 + 8,
|
||||
.vsync_end = 1280 + 8 + 48,
|
||||
.vtotal = 1280 + 8 + 48 + 52,
|
||||
.vsync_start = 1280 + 16,
|
||||
.vsync_end = 1280 + 16 + 6,
|
||||
.vtotal = 1280 + 16 + 6 + 15,
|
||||
|
||||
.width_mm = 135,
|
||||
.height_mm = 217,
|
||||
|
@ -535,13 +535,19 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
|
||||
return 0;
|
||||
|
||||
vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
|
||||
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vc4_crtc_disable(crtc, encoder, NULL, channel);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_put(&vc4_hdmi->pdev->dev);
|
||||
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
@ -728,14 +734,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
|
||||
struct drm_crtc *crtc = &vc4_crtc->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
|
||||
u32 chan = vc4_state->assigned_channel;
|
||||
u32 chan = vc4_crtc->current_hvs_channel;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
spin_lock(&vc4_crtc->irq_lock);
|
||||
if (vc4_crtc->event &&
|
||||
(vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
|
||||
vc4_state->feed_txp)) {
|
||||
(vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
|
||||
vc4_crtc->feeds_txp)) {
|
||||
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
|
||||
vc4_crtc->event = NULL;
|
||||
drm_crtc_vblank_put(crtc);
|
||||
@ -748,6 +754,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
|
||||
*/
|
||||
vc4_hvs_unmask_underrun(dev, chan);
|
||||
}
|
||||
spin_unlock(&vc4_crtc->irq_lock);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
@ -913,7 +920,6 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
return NULL;
|
||||
|
||||
old_vc4_state = to_vc4_crtc_state(crtc->state);
|
||||
vc4_state->feed_txp = old_vc4_state->feed_txp;
|
||||
vc4_state->margins = old_vc4_state->margins;
|
||||
vc4_state->assigned_channel = old_vc4_state->assigned_channel;
|
||||
|
||||
@ -974,6 +980,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
|
||||
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
|
||||
.mode_valid = vc4_crtc_mode_valid,
|
||||
.atomic_check = vc4_crtc_atomic_check,
|
||||
.atomic_begin = vc4_hvs_atomic_begin,
|
||||
.atomic_flush = vc4_hvs_atomic_flush,
|
||||
.atomic_enable = vc4_crtc_atomic_enable,
|
||||
.atomic_disable = vc4_crtc_atomic_disable,
|
||||
@ -1148,6 +1155,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
|
||||
return PTR_ERR(primary_plane);
|
||||
}
|
||||
|
||||
spin_lock_init(&vc4_crtc->irq_lock);
|
||||
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
|
||||
crtc_funcs, NULL);
|
||||
drm_crtc_helper_add(crtc, crtc_helper_funcs);
|
||||
|
@ -497,6 +497,33 @@ struct vc4_crtc {
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
struct debugfs_regset32 regset;
|
||||
|
||||
/**
|
||||
* @feeds_txp: True if the CRTC feeds our writeback controller.
|
||||
*/
|
||||
bool feeds_txp;
|
||||
|
||||
/**
|
||||
* @irq_lock: Spinlock protecting the resources shared between
|
||||
* the atomic code and our vblank handler.
|
||||
*/
|
||||
spinlock_t irq_lock;
|
||||
|
||||
/**
|
||||
* @current_dlist: Start offset of the display list currently
|
||||
* set in the HVS for that CRTC. Protected by @irq_lock, and
|
||||
* copied in vc4_hvs_update_dlist() for the CRTC interrupt
|
||||
* handler to have access to that value.
|
||||
*/
|
||||
unsigned int current_dlist;
|
||||
|
||||
/**
|
||||
* @current_hvs_channel: HVS channel currently assigned to the
|
||||
* CRTC. Protected by @irq_lock, and copied in
|
||||
* vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
|
||||
* access to that value.
|
||||
*/
|
||||
unsigned int current_hvs_channel;
|
||||
};
|
||||
|
||||
static inline struct vc4_crtc *
|
||||
@ -526,7 +553,6 @@ struct vc4_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
/* Dlist area for this CRTC configuration. */
|
||||
struct drm_mm_node mm;
|
||||
bool feed_txp;
|
||||
bool txp_armed;
|
||||
unsigned int assigned_channel;
|
||||
|
||||
@ -918,6 +944,7 @@ extern struct platform_driver vc4_hvs_driver;
|
||||
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
|
||||
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
|
||||
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
|
||||
void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
|
||||
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
|
||||
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
|
||||
void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -97,9 +97,6 @@ struct vc4_hdmi_variant {
|
||||
/* Callback to disable the RNG in the PHY */
|
||||
void (*phy_rng_disable)(struct vc4_hdmi *vc4_hdmi);
|
||||
|
||||
/* Callback to calculate hsm clock */
|
||||
u32 (*calc_hsm_clock)(struct vc4_hdmi *vc4_hdmi, unsigned long pixel_rate);
|
||||
|
||||
/* Callback to get channel map */
|
||||
u32 (*channel_map)(struct vc4_hdmi *vc4_hdmi, u32 channel_mask);
|
||||
|
||||
@ -180,12 +177,46 @@ struct vc4_hdmi {
|
||||
|
||||
struct reset_control *reset;
|
||||
|
||||
struct clk_request *bvb_req;
|
||||
struct clk_request *hsm_req;
|
||||
|
||||
/* Common debugfs regset */
|
||||
struct debugfs_regset32 hdmi_regset;
|
||||
struct debugfs_regset32 hd_regset;
|
||||
|
||||
/**
|
||||
* @hw_lock: Spinlock protecting device register access.
|
||||
*/
|
||||
spinlock_t hw_lock;
|
||||
|
||||
/**
|
||||
* @mutex: Mutex protecting the driver access across multiple
|
||||
* frameworks (KMS, ALSA).
|
||||
*
|
||||
* NOTE: While supported, CEC has been left out since
|
||||
* cec_s_phys_addr_from_edid() might call .adap_enable and lead to a
|
||||
* reentrancy issue between .get_modes (or .detect) and .adap_enable.
|
||||
* Since we don't share any state between the CEC hooks and KMS', it's
|
||||
* not a big deal. The only trouble might come from updating the CEC
|
||||
* clock divider which might be affected by a modeset, but CEC should
|
||||
* be resilient to that.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
|
||||
/**
|
||||
* @saved_adjusted_mode: Copy of @drm_crtc_state.adjusted_mode
|
||||
* for use by ALSA hooks and interrupt handlers. Protected by @mutex.
|
||||
*/
|
||||
struct drm_display_mode saved_adjusted_mode;
|
||||
|
||||
/**
|
||||
* @output_enabled: Is the HDMI controller currently active?
|
||||
* Protected by @mutex.
|
||||
*/
|
||||
bool output_enabled;
|
||||
|
||||
/**
|
||||
* @scdc_enabled: Is the HDMI controller currently running with
|
||||
* the scrambler on? Protected by @mutex.
|
||||
*/
|
||||
bool scdc_enabled;
|
||||
|
||||
/* VC5 debugfs regset */
|
||||
struct debugfs_regset32 cec_regset;
|
||||
struct debugfs_regset32 csc_regset;
|
||||
|
@ -130,31 +130,49 @@
|
||||
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
|
||||
struct vc4_hdmi_connector_state *conn_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* PHY should be in reset, like
|
||||
* vc4_hdmi_encoder_disable() does.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
|
||||
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
|
||||
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
|
||||
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
|
||||
HDMI_READ(HDMI_TX_PHY_CTL_0) &
|
||||
~VC4_HDMI_TX_PHY_RNG_PWRDN);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
|
||||
HDMI_READ(HDMI_TX_PHY_CTL_0) |
|
||||
VC4_HDMI_TX_PHY_RNG_PWRDN);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
static unsigned long long
|
||||
@ -336,6 +354,8 @@ phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
|
||||
|
||||
static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
lockdep_assert_held(&vc4_hdmi->hw_lock);
|
||||
|
||||
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f);
|
||||
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
|
||||
}
|
||||
@ -348,10 +368,13 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
|
||||
unsigned long long pixel_freq = conn_state->pixel_rate;
|
||||
unsigned long long vco_freq;
|
||||
unsigned char word_sel;
|
||||
unsigned long flags;
|
||||
u8 vco_sel, vco_div;
|
||||
|
||||
vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div);
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
|
||||
vc5_hdmi_reset_phy(vc4_hdmi);
|
||||
|
||||
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
|
||||
@ -501,23 +524,37 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
|
||||
HDMI_READ(HDMI_TX_PHY_RESET_CTL) |
|
||||
VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
|
||||
VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB);
|
||||
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
vc5_hdmi_reset_phy(vc4_hdmi);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
|
||||
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) &
|
||||
~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
||||
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
|
||||
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) |
|
||||
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
}
|
||||
|
@ -445,6 +445,8 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
|
||||
const struct vc4_hdmi_variant *variant = hdmi->variant;
|
||||
void __iomem *base;
|
||||
|
||||
lockdep_assert_held(&hdmi->hw_lock);
|
||||
|
||||
WARN_ON(!pm_runtime_active(&hdmi->pdev->dev));
|
||||
|
||||
if (reg >= variant->num_registers) {
|
||||
|
@ -404,17 +404,16 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
|
||||
unsigned long flags;
|
||||
|
||||
if (crtc->state->event) {
|
||||
unsigned long flags;
|
||||
|
||||
crtc->state->event->pipe = drm_crtc_index(crtc);
|
||||
|
||||
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
if (!vc4_state->feed_txp || vc4_state->txp_armed) {
|
||||
if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) {
|
||||
vc4_crtc->event = crtc->state->event;
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
@ -427,6 +426,22 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
|
||||
HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
|
||||
vc4_state->mm.start);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
|
||||
vc4_crtc->current_dlist = vc4_state->mm.start;
|
||||
spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
void vc4_hvs_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
|
||||
vc4_crtc->current_hvs_channel = vc4_state->assigned_channel;
|
||||
spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
|
||||
@ -434,10 +449,9 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
|
||||
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
|
||||
bool oneshot = vc4_state->feed_txp;
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
bool oneshot = vc4_crtc->feeds_txp;
|
||||
|
||||
vc4_hvs_update_dlist(crtc);
|
||||
vc4_hvs_init_channel(vc4, crtc, mode, oneshot);
|
||||
|
@ -238,6 +238,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
unsigned int i;
|
||||
|
||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
u32 dispctrl;
|
||||
u32 dsp3_mux;
|
||||
@ -258,7 +259,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
* TXP IP, and we need to disable the FIFO2 -> pixelvalve1
|
||||
* route.
|
||||
*/
|
||||
if (vc4_state->feed_txp)
|
||||
if (vc4_crtc->feeds_txp)
|
||||
dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
|
||||
else
|
||||
dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
|
||||
@ -368,20 +369,6 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
|
||||
}
|
||||
|
||||
if (vc4->hvs && vc4->hvs->hvs5) {
|
||||
unsigned long core_rate = max_t(unsigned long,
|
||||
500000000,
|
||||
new_hvs_state->core_clock_rate);
|
||||
|
||||
core_req = clk_request_start(hvs->core_clk, core_rate);
|
||||
|
||||
/*
|
||||
* And remove the previous one based on the HVS
|
||||
* requirements if any.
|
||||
*/
|
||||
clk_request_done(hvs->core_req);
|
||||
}
|
||||
|
||||
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
|
||||
struct vc4_crtc_state *vc4_crtc_state =
|
||||
to_vc4_crtc_state(old_crtc_state);
|
||||
@ -404,6 +391,26 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
drm_err(dev, "Timed out waiting for commit\n");
|
||||
}
|
||||
|
||||
if (vc4->hvs && vc4->hvs->hvs5) {
|
||||
unsigned long core_rate = max_t(unsigned long,
|
||||
500000000,
|
||||
new_hvs_state->core_clock_rate);
|
||||
|
||||
drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
|
||||
|
||||
/*
|
||||
* Do a temporary request on the core clock during the
|
||||
* modeset.
|
||||
*/
|
||||
core_req = clk_request_start(hvs->core_clk, core_rate);
|
||||
|
||||
/*
|
||||
* And remove the previous one based on the HVS
|
||||
* requirements if any.
|
||||
*/
|
||||
clk_request_done(hvs->core_req);
|
||||
}
|
||||
|
||||
drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
|
||||
vc4_ctm_commit(vc4, state);
|
||||
|
@ -391,7 +391,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
int ret;
|
||||
|
||||
ret = vc4_hvs_atomic_check(crtc, state);
|
||||
@ -399,7 +398,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
|
||||
return ret;
|
||||
|
||||
crtc_state->no_vblank = true;
|
||||
vc4_state->feed_txp = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -437,6 +435,7 @@ static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
|
||||
|
||||
static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = {
|
||||
.atomic_check = vc4_txp_atomic_check,
|
||||
.atomic_begin = vc4_hvs_atomic_begin,
|
||||
.atomic_flush = vc4_hvs_atomic_flush,
|
||||
.atomic_enable = vc4_txp_atomic_enable,
|
||||
.atomic_disable = vc4_txp_atomic_disable,
|
||||
@ -482,6 +481,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
|
||||
|
||||
vc4_crtc->pdev = pdev;
|
||||
vc4_crtc->data = &vc4_txp_crtc_data;
|
||||
vc4_crtc->feeds_txp = true;
|
||||
|
||||
txp->pdev = pdev;
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define _HYPERV_VMBUS_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/sync_bitops.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <linux/atomic.h>
|
||||
|
@ -71,8 +71,6 @@
|
||||
#define TCOBASE(p) ((p)->tco_res->start)
|
||||
/* SMI Control and Enable Register */
|
||||
#define SMI_EN(p) ((p)->smi_res->start)
|
||||
#define TCO_EN (1 << 13)
|
||||
#define GBL_SMI_EN (1 << 0)
|
||||
|
||||
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
|
||||
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
|
||||
@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
|
||||
|
||||
tmrval = seconds_to_ticks(p, t);
|
||||
|
||||
/*
|
||||
* If TCO SMIs are off, the timer counts down twice before rebooting.
|
||||
* Otherwise, the BIOS generally reboots when the SMI triggers.
|
||||
*/
|
||||
if (p->smi_res &&
|
||||
(inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
|
||||
/* For TCO v1 the timer counts down twice before rebooting */
|
||||
if (p->iTCO_version == 1)
|
||||
tmrval /= 2;
|
||||
|
||||
/* from the specs: */
|
||||
@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
|
||||
* Disables TCO logic generating an SMI#
|
||||
*/
|
||||
val32 = inl(SMI_EN(p));
|
||||
val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
|
||||
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
|
||||
outl(val32, SMI_EN(p));
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
|
||||
iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
|
||||
if (!iwdt)
|
||||
return -ENOMEM;
|
||||
iwdt->base = dev->platform_data;
|
||||
iwdt->base = (void __iomem *)dev->platform_data;
|
||||
|
||||
/*
|
||||
* Retrieve rate from a fixed clock from the device tree if
|
||||
|
@ -268,8 +268,12 @@ static int omap_wdt_probe(struct platform_device *pdev)
|
||||
wdev->wdog.bootstatus = WDIOF_CARDRESET;
|
||||
}
|
||||
|
||||
if (!early_enable)
|
||||
if (early_enable) {
|
||||
omap_wdt_start(&wdev->wdog);
|
||||
set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
|
||||
} else {
|
||||
omap_wdt_disable(wdev);
|
||||
}
|
||||
|
||||
ret = watchdog_register_device(&wdev->wdog);
|
||||
if (ret) {
|
||||
|
@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
|
||||
if (gwdt->version == 0)
|
||||
return readl(gwdt->control_base + SBSA_GWDT_WOR);
|
||||
else
|
||||
return readq(gwdt->control_base + SBSA_GWDT_WOR);
|
||||
return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
|
||||
}
|
||||
|
||||
static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
|
||||
@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
|
||||
if (gwdt->version == 0)
|
||||
writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
|
||||
else
|
||||
writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
|
||||
lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -411,4 +411,3 @@ MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
|
||||
MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
|
||||
MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
Loading…
Reference in New Issue
Block a user