3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-01 07:42:18 +00:00

Yeah keep looking. You might actually find that buttoned dick T3Q

This commit is contained in:
Raziel K. Crowe 2022-03-22 19:00:55 +05:00
parent 7390a54372
commit e754b31022
10 changed files with 167 additions and 33 deletions

View File

@ -1145,7 +1145,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
if (!dev->mode_config.allow_fb_modifiers) {
if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);

View File

@ -297,12 +297,27 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
static int __drm_object_property_get_value(struct drm_mode_object *obj,
static int __drm_object_property_get_prop_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
int i;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
}
return -EINVAL;
}
static int __drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
/* read-only properties bypass atomic mechanism and still store
* their value in obj->properties->values[].. mostly to avoid
* having to deal w/ EDID and similar props in atomic paths:
@ -311,15 +326,7 @@ static int __drm_object_property_get_value(struct drm_mode_object *obj,
!(property->flags & DRM_MODE_PROP_IMMUTABLE))
return drm_atomic_get_property(obj, property, val);
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
}
return -EINVAL;
return __drm_object_property_get_prop_value(obj, property, val);
}
/**
@ -348,6 +355,32 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_get_value);
/**
* drm_object_property_get_default_value - retrieve the default value of a
* property when in atomic mode.
* @obj: drm mode object to get property value from
* @property: property to retrieve
* @val: storage for the property value
*
* This function retrieves the default state of the given property as passed in
* to drm_object_attach_property
*
* Only atomic drivers should call this function directly, as for non-atomic
* drivers it will return the current value.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_get_default_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val)
{
WARN_ON(!drm_drv_uses_atomic_modeset(property->dev));
return __drm_object_property_get_prop_value(obj, property, val);
}
EXPORT_SYMBOL(drm_object_property_get_default_value);
/* helper for getconnector and getproperties ioctls */
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
uint32_t __user *prop_ptr,

View File

@ -1658,7 +1658,7 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
}
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -13217,6 +13217,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}

View File

@ -629,6 +629,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane);

View File

@ -6681,6 +6681,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
{
const struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
for_each_intel_crtc(&i915->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
if (dbuf_state->slices[crtc->pipe] & ~slices)
return true;
if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
I915_MAX_PIPES, crtc->pipe))
return true;
}
return false;
}
void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
/*
* On TGL/RKL (at least) the BIOS likes to assign the planes
* to the wrong DBUF slices. This will cause an infinite loop
* in skl_commit_modeset_enables() as it can't find a way to
* transition between the old bogus DBUF layout to the new
* proper DBUF layout without DBUF allocation overlaps between
* the planes (which cannot be allowed or else the hardware
* may hang). If we detect a bogus DBUF layout just turn off
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
if (!skl_dbuf_is_misconfigured(i915))
return;
drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;

View File

@ -48,6 +48,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);

View File

@ -83,6 +83,7 @@ config DRM_PANEL_SIMPLE
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
select DRM_DP_HELPER
help
DRM panel driver for dumb panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so

View File

@ -854,6 +854,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool debug_dump_regs = false;
bool enable_bg_fill = false;
u32 __iomem *dlist_start, *dlist_next;
unsigned int zpos = 0;
bool found = false;
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
return;
@ -867,7 +869,13 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
dlist_next = dlist_start;
/* Copy all the active planes' dlist contents to the hardware dlist. */
do {
found = false;
drm_atomic_crtc_for_each_plane(plane, crtc) {
if (plane->state->normalized_zpos != zpos)
continue;
/* Is this the first active plane? */
if (dlist_next == dlist_start) {
/* We need to enable background fill when a plane
@ -883,8 +891,13 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
}
dlist_next += vc4_plane_write_dlist(plane, dlist_next);
found = true;
}
zpos++;
} while (found);
writel(SCALER_CTL0_END, dlist_next);
dlist_next++;

View File

@ -1042,7 +1042,6 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
if (vc4->firmware_kms)
dev->mode_config.normalize_zpos = true;
ret = vc4_ctm_obj_init(vc4);

View File

@ -1572,9 +1572,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (type == DRM_PLANE_TYPE_PRIMARY)
drm_plane_create_zpos_immutable_property(plane, 0);
return plane;
}
#define VC4_NUM_OVERLAY_PLANES 16
int vc4_plane_create_additional_planes(struct drm_device *drm)
{
struct drm_plane *cursor_plane;
@ -1590,7 +1595,7 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* modest number of planes to expose, that should hopefully
* still cover any sane usecase.
*/
for (i = 0; i < 16; i++) {
for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
struct drm_plane *plane =
vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
@ -1599,17 +1604,28 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
plane->possible_crtcs =
GENMASK(drm->mode_config.num_crtc - 1, 0);
/* Create zpos property. Max of all the overlays + 1 primary +
* 1 cursor plane on a crtc.
*/
drm_plane_create_zpos_property(plane, i + 1, 1,
VC4_NUM_OVERLAY_PLANES + 1);
}
drm_for_each_crtc(crtc, drm) {
/* Set up the legacy cursor after overlay initialization,
* since we overlay planes on the CRTC in the order they were
* initialized.
* since the zpos fallback is that planes are rendered by plane
* ID order, and that then puts the cursor on top.
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
drm_plane_create_zpos_property(cursor_plane,
VC4_NUM_OVERLAY_PLANES + 1,
1,
VC4_NUM_OVERLAY_PLANES + 1);
}
}