Skip to content
Snippets Groups Projects
Commit c0f6f503 authored by Liu Ying's avatar Liu Ying Committed by Leonard Crestez
Browse files

MLK-21378-5 gpu: imx: Add dpu KMS support


Fast-forward dpu KMS driver from imx_4.14.y.

Signed-off-by: default avatarLiu Ying <victor.liu@nxp.com>
parent eae08b6e
No related branches found
No related tags found
No related merge requests found
......@@ -65,3 +65,4 @@ config DRM_IMX_SEC_DSIM
source "drivers/gpu/drm/imx/lcdif/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/hdp/Kconfig"
source "drivers/gpu/drm/imx/dpu/Kconfig"
......@@ -16,3 +16,4 @@ obj-$(CONFIG_DRM_IMX_SEC_DSIM) += sec_mipi_dsim-imx.o
obj-$(CONFIG_DRM_IMX_LCDIF) += lcdif/
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX_HDP) += hdp/
obj-$(CONFIG_DRM_IMX_DPU) += dpu/
config DRM_IMX_DPU
tristate
depends on DRM_IMX
depends on IMX_DPU_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
ccflags-y += -Idrivers/gpu/drm/imx
imx-dpu-crtc-objs := dpu-crtc.o dpu-plane.o dpu-kms.o
obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-crtc.o
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <video/dpu.h>
#include <video/imx8-pc.h>
#include "dpu-crtc.h"
#include "dpu-kms.h"
#include "dpu-plane.h"
#include "imx-drm.h"
static inline struct dpu_plane_state **
alloc_dpu_plane_states(struct dpu_crtc *dpu_crtc)
{
struct dpu_plane_state **states;
states = kcalloc(dpu_crtc->hw_plane_num, sizeof(*states), GFP_KERNEL);
if (!states)
return ERR_PTR(-ENOMEM);
return states;
}
struct dpu_plane_state **
crtc_state_get_dpu_plane_states(struct drm_crtc_state *state)
{
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
return dcstate->dpu_plane_states;
}
static struct dpu_crtc *dpu_crtc_get_aux_dpu_crtc(struct dpu_crtc *dpu_crtc)
{
struct drm_crtc *crtc = &dpu_crtc->base, *tmp_crtc;
struct drm_device *dev = crtc->dev;
struct dpu_crtc *aux_dpu_crtc = NULL;
drm_for_each_crtc(tmp_crtc, dev) {
if (tmp_crtc == crtc)
continue;
aux_dpu_crtc = to_dpu_crtc(tmp_crtc);
if (dpu_crtc->crtc_grp_id == aux_dpu_crtc->crtc_grp_id)
break;
}
BUG_ON(!aux_dpu_crtc);
return aux_dpu_crtc;
}
static void dpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_res *res = &dplane->grp->res;
struct dpu_extdst *plane_ed = res->ed[dplane->stream_id];
struct dpu_extdst *aux_plane_ed = dpu_aux_ed_peek(plane_ed);
struct dpu_extdst *m_plane_ed = NULL, *s_plane_ed;
struct completion *shdld_done;
struct completion *m_safety_shdld_done, *s_safety_shdld_done;
struct completion *m_content_shdld_done, *s_content_shdld_done;
struct completion *m_dec_shdld_done, *s_dec_shdld_done;
unsigned long ret;
drm_crtc_vblank_on(crtc);
if (dcstate->use_pc) {
tcon_enable_pc(dpu_crtc->tcon);
if (extdst_is_master(plane_ed)) {
m_plane_ed = plane_ed;
s_plane_ed = aux_plane_ed;
} else {
m_plane_ed = aux_plane_ed;
s_plane_ed = plane_ed;
}
extdst_pixengcfg_syncmode_master(m_plane_ed, true);
extdst_pixengcfg_syncmode_master(s_plane_ed, false);
} else {
extdst_pixengcfg_syncmode_master(plane_ed, false);
}
enable_irq(dpu_crtc->safety_shdld_irq);
enable_irq(dpu_crtc->content_shdld_irq);
enable_irq(dpu_crtc->dec_shdld_irq);
if (dcstate->use_pc) {
enable_irq(aux_dpu_crtc->safety_shdld_irq);
enable_irq(aux_dpu_crtc->content_shdld_irq);
enable_irq(aux_dpu_crtc->dec_shdld_irq);
}
if (dcstate->use_pc) {
framegen_enable_clock(dpu_crtc->stream_id ?
dpu_crtc->aux_fg : dpu_crtc->fg);
extdst_pixengcfg_sync_trigger(m_plane_ed);
framegen_shdtokgen(dpu_crtc->m_fg);
/* First turn on the slave stream, second the master stream. */
framegen_enable(dpu_crtc->s_fg);
framegen_enable(dpu_crtc->m_fg);
if (dpu_crtc->aux_is_master) {
m_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done;
m_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
m_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done;
s_safety_shdld_done = &dpu_crtc->safety_shdld_done;
s_content_shdld_done = &dpu_crtc->content_shdld_done;
s_dec_shdld_done = &dpu_crtc->dec_shdld_done;
} else {
m_safety_shdld_done = &dpu_crtc->safety_shdld_done;
m_content_shdld_done = &dpu_crtc->content_shdld_done;
m_dec_shdld_done = &dpu_crtc->dec_shdld_done;
s_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done;
s_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
s_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done;
}
ret = wait_for_completion_timeout(m_safety_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for master safety shdld done timeout\n");
ret = wait_for_completion_timeout(m_content_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for master content shdld done timeout\n");
ret = wait_for_completion_timeout(m_dec_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for master dec shdld done timeout\n");
ret = wait_for_completion_timeout(s_safety_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for slave safety shdld done timeout\n");
ret = wait_for_completion_timeout(s_content_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for slave content shdld done timeout\n");
ret = wait_for_completion_timeout(s_dec_shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for slave DEC shdld done timeout\n");
} else {
framegen_enable_clock(dpu_crtc->fg);
extdst_pixengcfg_sync_trigger(plane_ed);
extdst_pixengcfg_sync_trigger(dpu_crtc->ed);
framegen_shdtokgen(dpu_crtc->fg);
framegen_enable(dpu_crtc->fg);
shdld_done = &dpu_crtc->safety_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for safety shdld done timeout\n");
shdld_done = &dpu_crtc->content_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for content shdld done timeout\n");
shdld_done = &dpu_crtc->dec_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"enable - wait for dec shdld done timeout\n");
}
disable_irq(dpu_crtc->safety_shdld_irq);
disable_irq(dpu_crtc->content_shdld_irq);
disable_irq(dpu_crtc->dec_shdld_irq);
if (dcstate->use_pc) {
disable_irq(aux_dpu_crtc->safety_shdld_irq);
disable_irq(aux_dpu_crtc->content_shdld_irq);
disable_irq(aux_dpu_crtc->dec_shdld_irq);
}
if (crtc->state->event) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
/*
* TKT320590:
* Turn TCON into operation mode later after the first dumb frame is
* generated by DPU. This makes DPR/PRG be able to evade the frame.
* However, it turns out we have to set the TCON into operation mode
* first and then wait for Framegen frame counter moving, otherwise,
* the display pipeline is likely to broken(If pixel combiner is used,
* one of the two display streams cannot be setup correctly sometimes.
* If pixel combiner is unused and prefetch engine is used, the first
* atomic flush after the enablement is likely to fail - content shadow
* load irq doesn't come.). This is a mysterious issue.
*/
if (dcstate->use_pc) {
tcon_set_operation_mode(dpu_crtc->m_tcon);
tcon_set_operation_mode(dpu_crtc->s_tcon);
framegen_wait_for_frame_counter_moving(dpu_crtc->m_fg);
framegen_wait_for_frame_counter_moving(dpu_crtc->s_fg);
framegen_wait_for_secondary_syncup(dpu_crtc->m_fg);
framegen_wait_for_secondary_syncup(dpu_crtc->s_fg);
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->m_fg);
dev_warn(dpu_crtc->dev,
"enable - master FrameGen requests to read empty FIFO\n");
}
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->s_fg);
dev_warn(dpu_crtc->dev,
"enable - slave FrameGen requests to read empty FIFO\n");
}
} else {
tcon_set_operation_mode(dpu_crtc->tcon);
framegen_wait_for_frame_counter_moving(dpu_crtc->fg);
framegen_wait_for_secondary_syncup(dpu_crtc->fg);
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->fg);
dev_warn(dpu_crtc->dev,
"enable - FrameGen requests to read empty FIFO\n");
}
}
}
static void dpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state =
to_imx_crtc_state(old_crtc_state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
struct drm_display_mode *adjusted_mode = &old_crtc_state->adjusted_mode;
if (dcstate->use_pc) {
tcon_disable_pc(dpu_crtc->tcon);
/* First turn off the master stream, second the slave stream. */
framegen_disable(dpu_crtc->m_fg);
framegen_disable(dpu_crtc->s_fg);
framegen_wait_done(dpu_crtc->m_fg, adjusted_mode);
framegen_wait_done(dpu_crtc->s_fg, adjusted_mode);
framegen_disable_clock(dpu_crtc->stream_id ?
dpu_crtc->aux_fg : dpu_crtc->fg);
} else {
framegen_disable(dpu_crtc->fg);
framegen_wait_done(dpu_crtc->fg, adjusted_mode);
framegen_disable_clock(dpu_crtc->fg);
}
WARN_ON(!crtc->state->event);
if (crtc->state->event) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
drm_crtc_vblank_off(crtc);
}
static void dpu_drm_crtc_reset(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state;
struct dpu_crtc_state *state;
if (crtc->state) {
__drm_atomic_helper_crtc_destroy_state(crtc->state);
imx_crtc_state = to_imx_crtc_state(crtc->state);
state = to_dpu_crtc_state(imx_crtc_state);
kfree(state->dpu_plane_states);
kfree(state);
crtc->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
crtc->state = &state->imx_crtc_state.base;
crtc->state->crtc = crtc;
state->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc);
if (IS_ERR(state->dpu_plane_states))
kfree(state);
}
}
static struct drm_crtc_state *
dpu_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct imx_crtc_state *imx_crtc_state;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *state, *copy;
if (WARN_ON(!crtc->state))
return NULL;
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
copy->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc);
if (IS_ERR(copy->dpu_plane_states)) {
kfree(copy);
return NULL;
}
__drm_atomic_helper_crtc_duplicate_state(crtc,
&copy->imx_crtc_state.base);
imx_crtc_state = to_imx_crtc_state(crtc->state);
state = to_dpu_crtc_state(imx_crtc_state);
copy->use_pc = state->use_pc;
return &copy->imx_crtc_state.base;
}
static void dpu_drm_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state);
struct dpu_crtc_state *dcstate;
if (state) {
__drm_atomic_helper_crtc_destroy_state(state);
dcstate = to_dpu_crtc_state(imx_crtc_state);
kfree(dcstate->dpu_plane_states);
kfree(dcstate);
}
}
static int dpu_enable_vblank(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
enable_irq(dpu_crtc->vbl_irq);
return 0;
}
static void dpu_disable_vblank(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
disable_irq_nosync(dpu_crtc->vbl_irq);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = dpu_drm_crtc_reset,
.atomic_duplicate_state = dpu_drm_crtc_duplicate_state,
.atomic_destroy_state = dpu_drm_crtc_destroy_state,
.enable_vblank = dpu_enable_vblank,
.disable_vblank = dpu_disable_vblank,
};
static irqreturn_t dpu_vbl_irq_handler(int irq, void *dev_id)
{
struct dpu_crtc *dpu_crtc = dev_id;
drm_crtc_handle_vblank(&dpu_crtc->base);
return IRQ_HANDLED;
}
static irqreturn_t dpu_safety_shdld_irq_handler(int irq, void *dev_id)
{
struct dpu_crtc *dpu_crtc = dev_id;
complete(&dpu_crtc->safety_shdld_done);
return IRQ_HANDLED;
}
static irqreturn_t dpu_content_shdld_irq_handler(int irq, void *dev_id)
{
struct dpu_crtc *dpu_crtc = dev_id;
complete(&dpu_crtc->content_shdld_done);
return IRQ_HANDLED;
}
static irqreturn_t dpu_dec_shdld_irq_handler(int irq, void *dev_id)
{
struct dpu_crtc *dpu_crtc = dev_id;
complete(&dpu_crtc->dec_shdld_done);
return IRQ_HANDLED;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct dpu_plane_state *dpstate;
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct videomode vm;
unsigned long encoder_types = 0;
u32 encoder_mask;
int i = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_mask = 1 << drm_encoder_index(encoder);
if (!(crtc_state->encoder_mask & encoder_mask))
continue;
encoder_types |= BIT(encoder->encoder_type);
}
if (crtc_state->enable && dcstate->use_pc) {
if (encoder_types & BIT(DRM_MODE_ENCODER_LVDS))
return -EINVAL;
if (encoder_types & BIT(DRM_MODE_ENCODER_DSI))
return -EINVAL;
drm_display_mode_to_videomode(mode, &vm);
if ((vm.hactive % 2) || (vm.hfront_porch % 2) ||
(vm.hsync_len % 2) || (vm.hback_porch % 2))
return -EINVAL;
}
/*
* cache the plane states so that the planes can be disabled in
* ->atomic_begin.
*/
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
plane_state =
drm_atomic_get_plane_state(crtc_state->state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
dpstate = to_dpu_plane_state(plane_state);
dcstate->dpu_plane_states[i++] = dpstate;
}
return 0;
}
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state =
to_imx_crtc_state(old_crtc_state);
struct dpu_crtc_state *old_dcstate = to_dpu_crtc_state(imx_crtc_state);
int i;
/*
* Disable all planes' resources in SHADOW only.
* Whether any of them would be disabled or kept running depends
* on new plane states' commit.
*/
for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
struct dpu_plane_state *old_dpstate;
struct drm_plane_state *plane_state;
struct dpu_plane *dplane;
struct drm_plane *plane;
struct dpu_plane_res *res;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe = NULL;
struct dpu_hscaler *hs = NULL;
struct dpu_vscaler *vs = NULL;
struct dpu_layerblend *lb;
struct dpu_extdst *ed;
extdst_src_sel_t ed_src;
dpu_block_id_t blend, source;
unsigned int stream_id;
int lb_id;
bool crtc_disabling_on_primary;
bool release_aux_source;
old_dpstate = old_dcstate->dpu_plane_states[i];
if (!old_dpstate)
continue;
plane_state = &old_dpstate->base;
dplane = to_dpu_plane(plane_state->plane);
res = &dplane->grp->res;
release_aux_source = false;
again:
crtc_disabling_on_primary = false;
if (old_dcstate->use_pc) {
if (release_aux_source) {
source = old_dpstate->aux_source;
blend = old_dpstate->aux_blend;
stream_id = 1;
} else {
source = old_dpstate->source;
blend = old_dpstate->blend;
stream_id = old_dpstate->left_src_w ? 0 : 1;
}
} else {
source = old_dpstate->source;
blend = old_dpstate->blend;
stream_id = dplane->stream_id;
}
fu = source_to_fu(res, source);
if (!fu)
return;
lb_id = blend_to_id(blend);
if (lb_id < 0)
return;
lb = res->lb[lb_id];
layerblend_pixengcfg_clken(lb, CLKEN__DISABLE);
if (fetchunit_is_fetchdecode(fu)) {
fe = fetchdecode_get_fetcheco(fu);
hs = fetchdecode_get_hscaler(fu);
vs = fetchdecode_get_vscaler(fu);
hscaler_pixengcfg_clken(hs, CLKEN__DISABLE);
vscaler_pixengcfg_clken(vs, CLKEN__DISABLE);
hscaler_mode(hs, SCALER_NEUTRAL);
vscaler_mode(vs, SCALER_NEUTRAL);
}
if (old_dpstate->is_top) {
ed = res->ed[stream_id];
ed_src = stream_id ?
ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0;
extdst_pixengcfg_src_sel(ed, ed_src);
}
plane = old_dpstate->base.plane;
if (!crtc->state->enable &&
plane->type == DRM_PLANE_TYPE_PRIMARY)
crtc_disabling_on_primary = true;
if (crtc_disabling_on_primary && old_dpstate->use_prefetch) {
fu->ops->pin_off(fu);
if (fetchunit_is_fetchdecode(fu) &&
fe->ops->is_enabled(fe))
fe->ops->pin_off(fe);
} else {
fu->ops->disable_src_buf(fu);
fu->ops->unpin_off(fu);
if (fetchunit_is_fetchdecode(fu)) {
fetchdecode_pixengcfg_dynamic_src_sel(fu,
FD_SRC_DISABLE);
fe->ops->disable_src_buf(fe);
fe->ops->unpin_off(fe);
}
}
if (old_dpstate->need_aux_source && !release_aux_source) {
release_aux_source = true;
goto again;
}
}
}
static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc), *aux_dpu_crtc = NULL;
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct imx_crtc_state *old_imx_crtc_state =
to_imx_crtc_state(old_crtc_state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
struct dpu_crtc_state *old_dcstate =
to_dpu_crtc_state(old_imx_crtc_state);
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_res *res = &dplane->grp->res;
struct dpu_extdst *ed = res->ed[dplane->stream_id], *aux_ed;
struct completion *shdld_done;
struct completion *m_content_shdld_done = NULL;
struct completion *s_content_shdld_done = NULL;
unsigned long ret;
int i;
bool need_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
if (!crtc->state->active && !old_crtc_state->active)
return;
if (dcstate->use_pc) {
aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
if (dpu_crtc->aux_is_master) {
m_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
s_content_shdld_done = &dpu_crtc->content_shdld_done;
} else {
m_content_shdld_done = &dpu_crtc->content_shdld_done;
s_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
}
}
if (!need_modeset) {
enable_irq(dpu_crtc->content_shdld_irq);
if (dcstate->use_pc)
enable_irq(aux_dpu_crtc->content_shdld_irq);
if (dcstate->use_pc) {
if (extdst_is_master(ed)) {
extdst_pixengcfg_sync_trigger(ed);
} else {
aux_ed = dpu_aux_ed_peek(ed);
extdst_pixengcfg_sync_trigger(aux_ed);
}
} else {
extdst_pixengcfg_sync_trigger(ed);
}
if (dcstate->use_pc) {
shdld_done = m_content_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"flush - wait for master content shdld done timeout\n");
shdld_done = s_content_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"flush - wait for slave content shdld done timeout\n");
} else {
shdld_done = &dpu_crtc->content_shdld_done;
ret = wait_for_completion_timeout(shdld_done, HZ);
if (ret == 0)
dev_warn(dpu_crtc->dev,
"flush - wait for content shdld done timeout\n");
}
disable_irq(dpu_crtc->content_shdld_irq);
if (dcstate->use_pc)
disable_irq(aux_dpu_crtc->content_shdld_irq);
if (dcstate->use_pc) {
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->m_fg);
dev_warn(dpu_crtc->dev,
"flush - master FrameGen requests to read empty FIFO\n");
}
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->s_fg);
dev_warn(dpu_crtc->dev,
"flush - slave FrameGen requests to read empty FIFO\n");
}
} else {
if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) {
framegen_secondary_clear_channel_status(dpu_crtc->fg);
dev_warn(dpu_crtc->dev,
"flush - FrameGen requests to read empty FIFO\n");
}
}
WARN_ON(!crtc->state->event);
if (crtc->state->event) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
} else if (!crtc->state->active) {
if (old_dcstate->use_pc) {
if (extdst_is_master(ed)) {
extdst_pixengcfg_sync_trigger(ed);
} else {
aux_ed = dpu_aux_ed_peek(ed);
extdst_pixengcfg_sync_trigger(aux_ed);
}
} else {
extdst_pixengcfg_sync_trigger(ed);
}
}
for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
struct dpu_plane_state *old_dpstate;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe;
struct dpu_hscaler *hs;
struct dpu_vscaler *vs;
dpu_block_id_t source;
bool aux_source_disable;
old_dpstate = old_dcstate->dpu_plane_states[i];
if (!old_dpstate)
continue;
aux_source_disable = false;
again:
source = aux_source_disable ?
old_dpstate->aux_source : old_dpstate->source;
fu = source_to_fu(res, source);
if (!fu)
return;
if (!fu->ops->is_enabled(fu) || fu->ops->is_pinned_off(fu))
fu->ops->set_stream_id(fu, DPU_PLANE_SRC_DISABLED);
if (fetchunit_is_fetchdecode(fu)) {
fe = fetchdecode_get_fetcheco(fu);
if (!fe->ops->is_enabled(fe) ||
fe->ops->is_pinned_off(fe))
fe->ops->set_stream_id(fe,
DPU_PLANE_SRC_DISABLED);
hs = fetchdecode_get_hscaler(fu);
if (!hscaler_is_enabled(hs))
hscaler_set_stream_id(hs,
DPU_PLANE_SRC_DISABLED);
vs = fetchdecode_get_vscaler(fu);
if (!vscaler_is_enabled(vs))
vscaler_set_stream_id(vs,
DPU_PLANE_SRC_DISABLED);
}
if (old_dpstate->need_aux_source && !aux_source_disable) {
aux_source_disable = true;
goto again;
}
}
}
static void dpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_encoder *encoder;
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_res *res = &dplane->grp->res;
struct dpu_constframe *cf;
struct dpu_disengcfg *dec;
struct dpu_extdst *ed, *plane_ed;
struct dpu_framegen *fg;
struct dpu_tcon *tcon;
struct dpu_store *st;
extdst_src_sel_t ed_src;
unsigned long encoder_types = 0;
u32 encoder_mask;
unsigned int stream_id;
int crtc_hdisplay = dcstate->use_pc ?
(mode->crtc_hdisplay >> 1) : mode->crtc_hdisplay;
bool encoder_type_has_tmds = false;
bool encoder_type_has_lvds = false;
bool cfg_aux_pipe = false;
dev_dbg(dpu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(dpu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
dev_dbg(dpu_crtc->dev, "%s: mode->clock: %dKHz\n", __func__,
mode->clock);
dev_dbg(dpu_crtc->dev, "%s: mode->vrefresh: %dHz\n", __func__,
drm_mode_vrefresh(mode));
if (dcstate->use_pc)
dev_dbg(dpu_crtc->dev, "%s: use pixel combiner\n", __func__);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_mask = 1 << drm_encoder_index(encoder);
if (!(crtc->state->encoder_mask & encoder_mask))
continue;
encoder_types |= BIT(encoder->encoder_type);
}
if (encoder_types & BIT(DRM_MODE_ENCODER_TMDS)) {
encoder_type_has_tmds = true;
dev_dbg(dpu_crtc->dev, "%s: encoder type has TMDS\n", __func__);
}
if (encoder_types & BIT(DRM_MODE_ENCODER_LVDS)) {
encoder_type_has_lvds = true;
dev_dbg(dpu_crtc->dev, "%s: encoder type has LVDS\n", __func__);
}
again:
if (cfg_aux_pipe) {
cf = dpu_crtc->aux_cf;
dec = dpu_crtc->aux_dec;
ed = dpu_crtc->aux_ed;
fg = dpu_crtc->aux_fg;
tcon = dpu_crtc->aux_tcon;
st = aux_dpu_crtc->st;
stream_id = dpu_crtc->stream_id ^ 1;
} else {
cf = dpu_crtc->cf;
dec = dpu_crtc->dec;
ed = dpu_crtc->ed;
fg = dpu_crtc->fg;
tcon = dpu_crtc->tcon;
st = dpu_crtc->st;
stream_id = dpu_crtc->stream_id;
}
if (dcstate->use_pc) {
store_pixengcfg_syncmode_fixup(st, true);
framegen_syncmode_fixup(fg,
framegen_is_master(fg) ? false : true);
framegen_syncmode(fg, framegen_is_master(fg) ?
FGSYNCMODE__MASTER : FGSYNCMODE__SLAVE_ONCE);
} else {
store_pixengcfg_syncmode_fixup(st, false);
framegen_syncmode_fixup(fg, false);
framegen_syncmode(fg, FGSYNCMODE__OFF);
}
framegen_cfg_videomode(fg, mode, dcstate->use_pc,
encoder_type_has_tmds, encoder_type_has_lvds);
framegen_displaymode(fg, FGDM__SEC_ON_TOP);
framegen_panic_displaymode(fg, FGDM__TEST);
tcon_cfg_videomode(tcon, mode, dcstate->use_pc);
tcon_set_fmt(tcon, imx_crtc_state->bus_format);
if (dpu_crtc->has_pc)
tcon_configure_pc(tcon, stream_id, mode->crtc_hdisplay,
dcstate->use_pc ? PC_COMBINE : PC_BYPASS, 0);
disengcfg_polarity_ctrl(dec, mode->flags);
constframe_framedimensions(cf, crtc_hdisplay, mode->crtc_vdisplay);
ed_src = stream_id ? ED_SRC_CONSTFRAME5 : ED_SRC_CONSTFRAME4;
extdst_pixengcfg_src_sel(ed, ed_src);
plane_ed = res->ed[stream_id];
ed_src = stream_id ? ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0;
extdst_pixengcfg_src_sel(plane_ed, ed_src);
if (dcstate->use_pc && !cfg_aux_pipe) {
cfg_aux_pipe = true;
goto again;
}
}
static const struct drm_crtc_helper_funcs dpu_helper_funcs = {
.mode_set_nofb = dpu_crtc_mode_set_nofb,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
.atomic_flush = dpu_crtc_atomic_flush,
.atomic_enable = dpu_crtc_atomic_enable,
.atomic_disable = dpu_crtc_atomic_disable,
};
static void dpu_crtc_put_resources(struct dpu_crtc *dpu_crtc)
{
if (!IS_ERR_OR_NULL(dpu_crtc->cf))
dpu_cf_put(dpu_crtc->cf);
if (!IS_ERR_OR_NULL(dpu_crtc->dec))
dpu_dec_put(dpu_crtc->dec);
if (!IS_ERR_OR_NULL(dpu_crtc->ed))
dpu_ed_put(dpu_crtc->ed);
if (!IS_ERR_OR_NULL(dpu_crtc->fg))
dpu_fg_put(dpu_crtc->fg);
if (!IS_ERR_OR_NULL(dpu_crtc->tcon))
dpu_tcon_put(dpu_crtc->tcon);
}
static int dpu_crtc_get_resources(struct dpu_crtc *dpu_crtc)
{
struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent);
unsigned int stream_id = dpu_crtc->stream_id;
int ret;
dpu_crtc->cf = dpu_cf_get(dpu, stream_id + 4);
if (IS_ERR(dpu_crtc->cf)) {
ret = PTR_ERR(dpu_crtc->cf);
goto err_out;
}
dpu_crtc->aux_cf = dpu_aux_cf_peek(dpu_crtc->cf);
dpu_crtc->dec = dpu_dec_get(dpu, stream_id);
if (IS_ERR(dpu_crtc->dec)) {
ret = PTR_ERR(dpu_crtc->dec);
goto err_out;
}
dpu_crtc->aux_dec = dpu_aux_dec_peek(dpu_crtc->dec);
dpu_crtc->ed = dpu_ed_get(dpu, stream_id + 4);
if (IS_ERR(dpu_crtc->ed)) {
ret = PTR_ERR(dpu_crtc->ed);
goto err_out;
}
dpu_crtc->aux_ed = dpu_aux_ed_peek(dpu_crtc->ed);
dpu_crtc->fg = dpu_fg_get(dpu, stream_id);
if (IS_ERR(dpu_crtc->fg)) {
ret = PTR_ERR(dpu_crtc->fg);
goto err_out;
}
dpu_crtc->aux_fg = dpu_aux_fg_peek(dpu_crtc->fg);
dpu_crtc->tcon = dpu_tcon_get(dpu, stream_id);
if (IS_ERR(dpu_crtc->tcon)) {
ret = PTR_ERR(dpu_crtc->tcon);
goto err_out;
}
dpu_crtc->aux_tcon = dpu_aux_tcon_peek(dpu_crtc->tcon);
if (dpu_crtc->aux_is_master) {
dpu_crtc->m_cf = dpu_crtc->aux_cf;
dpu_crtc->m_dec = dpu_crtc->aux_dec;
dpu_crtc->m_ed = dpu_crtc->aux_ed;
dpu_crtc->m_fg = dpu_crtc->aux_fg;
dpu_crtc->m_tcon = dpu_crtc->aux_tcon;
dpu_crtc->s_cf = dpu_crtc->cf;
dpu_crtc->s_dec = dpu_crtc->dec;
dpu_crtc->s_ed = dpu_crtc->ed;
dpu_crtc->s_fg = dpu_crtc->fg;
dpu_crtc->s_tcon = dpu_crtc->tcon;
} else {
dpu_crtc->m_cf = dpu_crtc->cf;
dpu_crtc->m_dec = dpu_crtc->dec;
dpu_crtc->m_ed = dpu_crtc->ed;
dpu_crtc->m_fg = dpu_crtc->fg;
dpu_crtc->m_tcon = dpu_crtc->tcon;
dpu_crtc->s_cf = dpu_crtc->aux_cf;
dpu_crtc->s_dec = dpu_crtc->aux_dec;
dpu_crtc->s_ed = dpu_crtc->aux_ed;
dpu_crtc->s_fg = dpu_crtc->aux_fg;
dpu_crtc->s_tcon = dpu_crtc->aux_tcon;
}
return 0;
err_out:
dpu_crtc_put_resources(dpu_crtc);
return ret;
}
static int dpu_crtc_init(struct dpu_crtc *dpu_crtc,
struct dpu_client_platformdata *pdata, struct drm_device *drm)
{
struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent);
struct device *dev = dpu_crtc->dev;
struct drm_crtc *crtc = &dpu_crtc->base;
struct dpu_plane_grp *plane_grp = pdata->plane_grp;
unsigned int stream_id = pdata->stream_id;
int i, ret;
init_completion(&dpu_crtc->safety_shdld_done);
init_completion(&dpu_crtc->content_shdld_done);
init_completion(&dpu_crtc->dec_shdld_done);
dpu_crtc->stream_id = stream_id;
dpu_crtc->crtc_grp_id = pdata->di_grp_id;
dpu_crtc->hw_plane_num = plane_grp->hw_plane_num;
dpu_crtc->has_pc = dpu_has_pc(dpu);
dpu_crtc->syncmode_min_prate = dpu_get_syncmode_min_prate(dpu);
dpu_crtc->singlemode_max_width = dpu_get_singlemode_max_width(dpu);
dpu_crtc->master_stream_id = dpu_get_master_stream_id(dpu);
dpu_crtc->aux_is_master = dpu_crtc->has_pc ?
!(dpu_crtc->master_stream_id == stream_id) : false;
dpu_crtc->st = pdata->st9;
dpu_crtc->plane = devm_kcalloc(dev, dpu_crtc->hw_plane_num,
sizeof(*dpu_crtc->plane), GFP_KERNEL);
if (!dpu_crtc->plane)
return -ENOMEM;
ret = dpu_crtc_get_resources(dpu_crtc);
if (ret) {
dev_err(dev, "getting resources failed with %d.\n", ret);
return ret;
}
plane_grp->res.fg[stream_id] = dpu_crtc->fg;
dpu_crtc->plane[0] = dpu_plane_init(drm, 0, stream_id, plane_grp,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(dpu_crtc->plane[0])) {
ret = PTR_ERR(dpu_crtc->plane[0]);
dev_err(dev, "initializing plane0 failed with %d.\n", ret);
goto err_put_resources;
}
crtc->port = pdata->of_node;
drm_crtc_helper_add(crtc, &dpu_helper_funcs);
ret = drm_crtc_init_with_planes(drm, crtc, &dpu_crtc->plane[0]->base, NULL,
&dpu_crtc_funcs, NULL);
if (ret) {
dev_err(dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
}
for (i = 1; i < dpu_crtc->hw_plane_num; i++) {
dpu_crtc->plane[i] = dpu_plane_init(drm,
drm_crtc_mask(&dpu_crtc->base),
stream_id, plane_grp,
DRM_PLANE_TYPE_OVERLAY);
if (IS_ERR(dpu_crtc->plane[i])) {
ret = PTR_ERR(dpu_crtc->plane[i]);
dev_err(dev, "initializing plane%d failed with %d.\n",
i, ret);
goto err_put_resources;
}
}
dpu_crtc->vbl_irq = dpu_map_inner_irq(dpu, stream_id ?
IRQ_DISENGCFG_FRAMECOMPLETE1 :
IRQ_DISENGCFG_FRAMECOMPLETE0);
irq_set_status_flags(dpu_crtc->vbl_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, dpu_crtc->vbl_irq, dpu_vbl_irq_handler, 0,
"imx_drm", dpu_crtc);
if (ret < 0) {
dev_err(dev, "vblank irq request failed with %d.\n", ret);
goto err_put_resources;
}
disable_irq(dpu_crtc->vbl_irq);
dpu_crtc->safety_shdld_irq = dpu_map_inner_irq(dpu, stream_id ?
IRQ_EXTDST5_SHDLOAD : IRQ_EXTDST4_SHDLOAD);
irq_set_status_flags(dpu_crtc->safety_shdld_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, dpu_crtc->safety_shdld_irq,
dpu_safety_shdld_irq_handler, 0, "imx_drm",
dpu_crtc);
if (ret < 0) {
dev_err(dev,
"safety shadow load irq request failed with %d.\n",
ret);
goto err_put_resources;
}
disable_irq(dpu_crtc->safety_shdld_irq);
dpu_crtc->content_shdld_irq = dpu_map_inner_irq(dpu, stream_id ?
IRQ_EXTDST1_SHDLOAD : IRQ_EXTDST0_SHDLOAD);
irq_set_status_flags(dpu_crtc->content_shdld_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, dpu_crtc->content_shdld_irq,
dpu_content_shdld_irq_handler, 0, "imx_drm",
dpu_crtc);
if (ret < 0) {
dev_err(dev,
"content shadow load irq request failed with %d.\n",
ret);
goto err_put_resources;
}
disable_irq(dpu_crtc->content_shdld_irq);
dpu_crtc->dec_shdld_irq = dpu_map_inner_irq(dpu, stream_id ?
IRQ_DISENGCFG_SHDLOAD1 : IRQ_DISENGCFG_SHDLOAD0);
irq_set_status_flags(dpu_crtc->dec_shdld_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, dpu_crtc->dec_shdld_irq,
dpu_dec_shdld_irq_handler, 0, "imx_drm",
dpu_crtc);
if (ret < 0) {
dev_err(dev,
"DEC shadow load irq request failed with %d.\n",
ret);
goto err_put_resources;
}
disable_irq(dpu_crtc->dec_shdld_irq);
return 0;
err_put_resources:
dpu_crtc_put_resources(dpu_crtc);
return ret;
}
static int dpu_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct dpu_client_platformdata *pdata = dev->platform_data;
struct drm_device *drm = data;
struct dpu_crtc *dpu_crtc;
int ret;
dpu_crtc = devm_kzalloc(dev, sizeof(*dpu_crtc), GFP_KERNEL);
if (!dpu_crtc)
return -ENOMEM;
dpu_crtc->dev = dev;
ret = dpu_crtc_init(dpu_crtc, pdata, drm);
if (ret)
return ret;
if (!drm->mode_config.funcs)
drm->mode_config.funcs = &dpu_drm_mode_config_funcs;
dev_set_drvdata(dev, dpu_crtc);
return 0;
}
static void dpu_crtc_unbind(struct device *dev, struct device *master,
void *data)
{
struct dpu_crtc *dpu_crtc = dev_get_drvdata(dev);
dpu_crtc_put_resources(dpu_crtc);
/* make sure the crtc exists, and then cleanup */
if (dpu_crtc->base.dev)
drm_crtc_cleanup(&dpu_crtc->base);
}
static const struct component_ops dpu_crtc_ops = {
.bind = dpu_crtc_bind,
.unbind = dpu_crtc_unbind,
};
static int dpu_crtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (!dev->platform_data)
return -EINVAL;
return component_add(dev, &dpu_crtc_ops);
}
static int dpu_crtc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dpu_crtc_ops);
return 0;
}
static struct platform_driver dpu_crtc_driver = {
.driver = {
.name = "imx-dpu-crtc",
},
.probe = dpu_crtc_probe,
.remove = dpu_crtc_remove,
};
module_platform_driver(dpu_crtc_driver);
MODULE_AUTHOR("NXP Semiconductor");
MODULE_DESCRIPTION("i.MX DPU CRTC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-dpu-crtc");
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_CRTC_H_
#define _DPU_CRTC_H_
#include <video/dpu.h>
#include "dpu-plane.h"
#include "imx-drm.h"
struct dpu_crtc {
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
struct dpu_constframe *cf;
struct dpu_disengcfg *dec;
struct dpu_extdst *ed;
struct dpu_framegen *fg;
struct dpu_tcon *tcon;
struct dpu_store *st;
struct dpu_constframe *aux_cf;
struct dpu_disengcfg *aux_dec;
struct dpu_extdst *aux_ed;
struct dpu_framegen *aux_fg;
struct dpu_tcon *aux_tcon;
/* master */
struct dpu_constframe *m_cf;
struct dpu_disengcfg *m_dec;
struct dpu_extdst *m_ed;
struct dpu_framegen *m_fg;
struct dpu_tcon *m_tcon;
/* slave */
struct dpu_constframe *s_cf;
struct dpu_disengcfg *s_dec;
struct dpu_extdst *s_ed;
struct dpu_framegen *s_fg;
struct dpu_tcon *s_tcon;
struct dpu_plane **plane;
unsigned int hw_plane_num;
unsigned int stream_id;
unsigned int crtc_grp_id;
unsigned int syncmode_min_prate;
unsigned int singlemode_max_width;
unsigned int master_stream_id;
int vbl_irq;
int safety_shdld_irq;
int content_shdld_irq;
int dec_shdld_irq;
bool has_pc;
bool aux_is_master;
struct completion safety_shdld_done;
struct completion content_shdld_done;
struct completion dec_shdld_done;
};
struct dpu_crtc_state {
struct imx_crtc_state imx_crtc_state;
struct dpu_plane_state **dpu_plane_states;
bool use_pc;
};
static inline struct dpu_crtc_state *to_dpu_crtc_state(struct imx_crtc_state *s)
{
return container_of(s, struct dpu_crtc_state, imx_crtc_state);
}
static inline struct dpu_crtc *to_dpu_crtc(struct drm_crtc *crtc)
{
return container_of(crtc, struct dpu_crtc, base);
}
struct dpu_plane_state **
crtc_state_get_dpu_plane_states(struct drm_crtc_state *state);
#endif
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
#include <linux/sort.h>
#include <video/dpu.h>
#include "dpu-crtc.h"
#include "dpu-plane.h"
#include "imx-drm.h"
static void dpu_drm_output_poll_changed(struct drm_device *dev)
{
struct imx_drm_device *imxdrm = dev->dev_private;
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
}
static struct drm_plane_state **
dpu_atomic_alloc_tmp_planes_per_crtc(struct drm_device *dev)
{
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return ERR_PTR(-ENOMEM);
return states;
}
static int zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
return sa->normalized_zpos - sb->normalized_zpos;
}
static int dpu_atomic_sort_planes_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *dev = state->dev;
struct drm_plane *plane;
int n = 0;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
states[n++] = plane_state;
}
sort(states, n, sizeof(*states), zpos_cmp, NULL);
return n;
}
static int
dpu_atomic_compute_plane_base_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states, int n,
bool use_pc)
{
struct dpu_plane_state *dpstate;
int i, left, right, top, bottom, tmp;
int base_x, base_y, base_w, base_h;
int half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
bool lo, ro, bo;
/* compute the plane base */
left = states[0]->crtc_x;
top = states[0]->crtc_y;
right = states[0]->crtc_x + states[0]->crtc_w;
bottom = states[0]->crtc_y + states[0]->crtc_h;
for (i = 1; i < n; i++) {
left = min(states[i]->crtc_x, left);
top = min(states[i]->crtc_y, top);
tmp = states[i]->crtc_x + states[i]->crtc_w;
right = max(tmp, right);
tmp = states[i]->crtc_y + states[i]->crtc_h;
bottom = max(tmp, bottom);
}
/* BTW, be smart to compute the layer offset */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
dpstate->layer_x = states[i]->crtc_x - left;
dpstate->layer_y = states[i]->crtc_y - top;
}
/* store the base in plane state */
dpstate = to_dpu_plane_state(states[0]);
base_x = left;
base_y = top;
base_w = right - left;
base_h = bottom - top;
dpstate->base_x = base_x;
dpstate->base_y = base_y;
dpstate->base_w = base_w;
dpstate->base_h = base_h;
if (!use_pc)
return 0;
/* compute left/right_layer/base_x/w if pixel combiner is needed */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
lo = dpstate->left_src_w && !dpstate->right_src_w;
ro = !dpstate->left_src_w && dpstate->right_src_w;
bo = dpstate->left_src_w && dpstate->right_src_w;
if (lo || bo) {
dpstate->left_layer_x = dpstate->layer_x;
dpstate->right_layer_x = 0;
} else if (ro) {
dpstate->left_layer_x = 0;
dpstate->right_layer_x =
states[i]->crtc_x - half_hdisplay;
}
if (i)
continue;
if (base_x < half_hdisplay) {
dpstate->left_base_x = base_x;
dpstate->right_base_x = 0;
if ((base_x + base_w) < half_hdisplay) {
dpstate->left_base_w = base_w;
dpstate->right_base_w = 0;
} else {
dpstate->left_base_w = half_hdisplay - base_x;
dpstate->right_base_w =
base_x + base_w - half_hdisplay;
}
} else {
dpstate->left_base_x = 0;
dpstate->right_base_x = base_x - half_hdisplay;
dpstate->left_base_w = 0;
dpstate->right_base_w = base_w;
}
}
return 0;
}
static void
dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n,
bool use_pc)
{
struct dpu_plane_state *dpstate;
bool found_l_top = false, found_r_top = false;
int i;
for (i = n - 1; i >= 0; i--) {
dpstate = to_dpu_plane_state(states[i]);
if (use_pc) {
if (dpstate->left_src_w && !found_l_top) {
dpstate->is_left_top = true;
found_l_top = true;
} else {
dpstate->is_left_top = false;
}
if (dpstate->right_src_w && !found_r_top) {
dpstate->is_right_top = true;
found_r_top = true;
} else {
dpstate->is_right_top = false;
}
} else {
dpstate->is_top = (i == (n - 1)) ? true : false;
}
}
}
static int
dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states,
int n, bool use_pc)
{
struct dpu_plane_state *dpstate;
struct dpu_plane *dplane;
struct dpu_plane_grp *grp;
struct drm_framebuffer *fb;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe;
struct dpu_hscaler *hs;
struct dpu_vscaler *vs;
lb_prim_sel_t stage;
dpu_block_id_t blend;
unsigned int sid, src_sid;
unsigned int num_planes;
int i, j, k, l, m;
int total_asrc_num;
int s0_layer_cnt = 0, s1_layer_cnt = 0;
int s0_n = 0, s1_n = 0;
u32 src_a_mask, cap_mask, fe_mask, hs_mask, vs_mask;
bool need_fetcheco, need_hscaler, need_vscaler;
bool fmt_is_yuv;
bool alloc_aux_source;
if (use_pc) {
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
if (dpstate->left_src_w)
s0_n++;
if (dpstate->right_src_w)
s1_n++;
}
} else {
s0_n = n;
s1_n = n;
}
/* for active planes only */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
dplane = to_dpu_plane(states[i]->plane);
fb = states[i]->fb;
num_planes = drm_format_num_planes(fb->format->format);
fmt_is_yuv = drm_format_is_yuv(fb->format->format);
grp = dplane->grp;
alloc_aux_source = false;
if (use_pc)
sid = dpstate->left_src_w ? 0 : 1;
else
sid = dplane->stream_id;
again:
if (alloc_aux_source)
sid ^= 1;
need_fetcheco = (num_planes > 1);
need_hscaler = (states[i]->src_w >> 16 != states[i]->crtc_w);
need_vscaler = (states[i]->src_h >> 16 != states[i]->crtc_h);
total_asrc_num = 0;
src_a_mask = grp->src_a_mask;
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
for (l = 0; l < (sizeof(grp->src_a_mask) * 8); l++) {
if (grp->src_a_mask & BIT(l))
total_asrc_num++;
}
/* assign source */
mutex_lock(&grp->mutex);
for (k = 0; k < total_asrc_num; k++) {
m = ffs(src_a_mask) - 1;
fu = source_to_fu(&grp->res, sources[m]);
if (!fu)
return -EINVAL;
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fu);
if (src_sid && src_sid != BIT(sid))
goto next;
if (fetchunit_is_fetchdecode(fu)) {
cap_mask = fetchdecode_get_vproc_mask(fu);
if (need_fetcheco) {
fe = fetchdecode_get_fetcheco(fu);
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fe);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the fetcheco cap? */
if (!dpu_vproc_has_fetcheco_cap(cap_mask))
goto next;
fe_mask =
dpu_vproc_get_fetcheco_cap(cap_mask);
/* fetcheco available? */
if (grp->src_use_vproc_mask & fe_mask)
goto next;
}
if (need_hscaler) {
hs = fetchdecode_get_hscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = hscaler_get_stream_id(hs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the hscale cap */
if (!dpu_vproc_has_hscale_cap(cap_mask))
goto next;
hs_mask =
dpu_vproc_get_hscale_cap(cap_mask);
/* hscaler available? */
if (grp->src_use_vproc_mask & hs_mask)
goto next;
}
if (need_vscaler) {
vs = fetchdecode_get_vscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = vscaler_get_stream_id(vs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the vscale cap? */
if (!dpu_vproc_has_vscale_cap(cap_mask))
goto next;
vs_mask =
dpu_vproc_get_vscale_cap(cap_mask);
/* vscaler available? */
if (grp->src_use_vproc_mask & vs_mask)
goto next;
}
} else {
if (fmt_is_yuv || need_fetcheco ||
need_hscaler || need_vscaler)
goto next;
}
grp->src_a_mask &= ~BIT(m);
grp->src_use_vproc_mask |= fe_mask | hs_mask | vs_mask;
break;
next:
src_a_mask &= ~BIT(m);
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
}
mutex_unlock(&grp->mutex);
if (k == total_asrc_num)
return -EINVAL;
if (alloc_aux_source)
dpstate->aux_source = sources[m];
else
dpstate->source = sources[m];
/* assign stage and blend */
if (sid) {
j = grp->hw_plane_num - (s1_n - s1_layer_cnt);
stage = s1_layer_cnt ? stages[j - 1] : cf_stages[sid];
blend = blends[j];
s1_layer_cnt++;
} else {
stage = s0_layer_cnt ?
stages[s0_layer_cnt - 1] : cf_stages[sid];
blend = blends[s0_layer_cnt];
s0_layer_cnt++;
}
if (alloc_aux_source) {
dpstate->aux_stage = stage;
dpstate->aux_blend = blend;
} else {
dpstate->stage = stage;
dpstate->blend = blend;
}
if (dpstate->need_aux_source && !alloc_aux_source) {
alloc_aux_source = true;
goto again;
}
}
return 0;
}
static void
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(struct drm_crtc *crtc,
u32 crtc_mask,
struct drm_atomic_state *state,
bool *puts)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool found_pstate = false;
int i;
if ((crtc_mask & drm_crtc_mask(crtc)) == 0) {
for_each_new_plane_in_state(state, plane, plane_state, i) {
if (plane->possible_crtcs &
drm_crtc_mask(crtc)) {
found_pstate = true;
break;
}
}
if (!found_pstate)
puts[drm_crtc_index(crtc)] = true;
}
}
static void
dpu_atomic_put_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
int index = drm_plane_index(plane);
plane->funcs->atomic_destroy_state(plane, state->planes[index].state);
state->planes[index].ptr = NULL;
state->planes[index].state = NULL;
drm_modeset_unlock(&plane->mutex);
}
static void
dpu_atomic_put_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int index = drm_crtc_index(crtc);
crtc->funcs->atomic_destroy_state(crtc, state->crtcs[index].state);
state->crtcs[index].ptr = NULL;
state->crtcs[index].state = NULL;
drm_modeset_unlock(&crtc->mutex);
}
static void
dpu_atomic_put_possible_states_per_crtc(struct drm_crtc_state *crtc_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_crtc_state *old_crtc_state = crtc->state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_state **old_dpstates;
struct dpu_plane_state *old_dpstate, *new_dpstate;
u32 active_mask = 0;
int i;
old_dpstates = crtc_state_get_dpu_plane_states(old_crtc_state);
if (WARN_ON(!old_dpstates))
return;
for (i = 0; i < dplane->grp->hw_plane_num; i++) {
old_dpstate = old_dpstates[i];
if (!old_dpstate)
continue;
active_mask |= BIT(i);
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
if (drm_plane_index(plane) !=
drm_plane_index(old_dpstate->base.plane))
continue;
plane_state =
drm_atomic_get_existing_plane_state(state,
plane);
WARN_ON(!plane_state);
new_dpstate = to_dpu_plane_state(plane_state);
active_mask &= ~BIT(i);
/*
* Should be enough to check the below real HW plane
* resources only.
* Vproc resources and things like layer_x/y should
* be fine.
*/
if (old_dpstate->stage != new_dpstate->stage ||
old_dpstate->source != new_dpstate->source ||
old_dpstate->blend != new_dpstate->blend ||
old_dpstate->aux_stage != new_dpstate->aux_stage ||
old_dpstate->aux_source != new_dpstate->aux_source ||
old_dpstate->aux_blend != new_dpstate->aux_blend)
return;
}
}
/* pure software check */
if (WARN_ON(active_mask))
return;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
dpu_atomic_put_plane_state(state, plane);
dpu_atomic_put_crtc_state(state, crtc);
}
static int dpu_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct dpu_plane *dpu_plane;
struct drm_plane_state *plane_state;
struct dpu_plane_state *dpstate;
struct drm_framebuffer *fb;
struct dpu_plane_grp *grp[MAX_DPU_PLANE_GRP];
int ret, i, grp_id;
int active_plane[MAX_DPU_PLANE_GRP];
int active_plane_fetcheco[MAX_DPU_PLANE_GRP];
int active_plane_hscale[MAX_DPU_PLANE_GRP];
int active_plane_vscale[MAX_DPU_PLANE_GRP];
int half_hdisplay = 0;
bool pipe_states_prone_to_put[MAX_CRTC];
bool use_pc[MAX_DPU_PLANE_GRP];
u32 crtc_mask_in_state = 0;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
for (i = 0; i < MAX_CRTC; i++)
pipe_states_prone_to_put[i] = false;
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
active_plane[i] = 0;
active_plane_fetcheco[i] = 0;
active_plane_hscale[i] = 0;
active_plane_vscale[i] = 0;
use_pc[i] = false;
grp[i] = NULL;
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
crtc_mask_in_state |= drm_crtc_mask(crtc);
drm_for_each_crtc(crtc, dev) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state;
struct dpu_crtc_state *dcstate;
bool need_left, need_right, need_aux_source, use_pc_per_crtc;
use_pc_per_crtc = false;
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(crtc,
crtc_mask_in_state, state,
pipe_states_prone_to_put);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
imx_crtc_state = to_imx_crtc_state(crtc_state);
dcstate = to_dpu_crtc_state(imx_crtc_state);
if (crtc_state->enable) {
if (use_pc[dpu_crtc->crtc_grp_id])
return -EINVAL;
if (crtc_state->adjusted_mode.clock >
dpu_crtc->syncmode_min_prate ||
crtc_state->adjusted_mode.hdisplay >
dpu_crtc->singlemode_max_width) {
if (!dpu_crtc->has_pc)
return -EINVAL;
use_pc_per_crtc = true;
}
}
if (use_pc_per_crtc) {
use_pc[dpu_crtc->crtc_grp_id] = true;
half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
}
dcstate->use_pc = use_pc_per_crtc;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
plane_state = drm_atomic_get_plane_state(state, plane);
dpstate = to_dpu_plane_state(plane_state);
fb = plane_state->fb;
dpu_plane = to_dpu_plane(plane);
grp_id = dpu_plane->grp->id;
active_plane[grp_id]++;
need_left = false;
need_right = false;
need_aux_source = false;
if (use_pc_per_crtc) {
if (plane_state->crtc_x < half_hdisplay)
need_left = true;
if ((plane_state->crtc_w +
plane_state->crtc_x) > half_hdisplay)
need_right = true;
if (need_left && need_right) {
need_aux_source = true;
active_plane[grp_id]++;
}
}
if (need_left && need_right) {
dpstate->left_crtc_w = half_hdisplay;
dpstate->left_crtc_w -= plane_state->crtc_x;
dpstate->left_src_w = dpstate->left_crtc_w;
} else if (need_left) {
dpstate->left_crtc_w = plane_state->crtc_w;
dpstate->left_src_w = plane_state->src_w >> 16;
} else {
dpstate->left_crtc_w = 0;
dpstate->left_src_w = 0;
}
if (need_right && need_left) {
dpstate->right_crtc_w = plane_state->crtc_x +
plane_state->crtc_w;
dpstate->right_crtc_w -= half_hdisplay;
dpstate->right_src_w = dpstate->right_crtc_w;
} else if (need_right) {
dpstate->right_crtc_w = plane_state->crtc_w;
dpstate->right_src_w = plane_state->src_w >> 16;
} else {
dpstate->right_crtc_w = 0;
dpstate->right_src_w = 0;
}
if (drm_format_num_planes(fb->format->format) > 1) {
active_plane_fetcheco[grp_id]++;
if (need_aux_source)
active_plane_fetcheco[grp_id]++;
}
if (plane_state->src_w >> 16 != plane_state->crtc_w) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_hscale[grp_id]++;
}
if (plane_state->src_h >> 16 != plane_state->crtc_h) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_vscale[grp_id]++;
}
if (grp[grp_id] == NULL)
grp[grp_id] = dpu_plane->grp;
dpstate->need_aux_source = need_aux_source;
}
}
/* enough resources? */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (grp[i]) {
if (active_plane[i] > grp[i]->hw_plane_num)
return -EINVAL;
if (active_plane_fetcheco[i] >
grp[i]->hw_plane_fetcheco_num)
return -EINVAL;
if (active_plane_hscale[i] >
grp[i]->hw_plane_hscaler_num)
return -EINVAL;
if (active_plane_vscale[i] >
grp[i]->hw_plane_vscaler_num)
return -EINVAL;
}
}
/* clear resource mask */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (grp[i]) {
mutex_lock(&grp[i]->mutex);
grp[i]->src_a_mask = ~grp[i]->src_na_mask;
grp[i]->src_use_vproc_mask = 0;
mutex_unlock(&grp[i]->mutex);
}
}
ret = drm_atomic_normalize_zpos(dev, state);
if (ret)
return ret;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_plane_state **states;
int n;
states = dpu_atomic_alloc_tmp_planes_per_crtc(dev);
if (IS_ERR(states))
return PTR_ERR(states);
n = dpu_atomic_sort_planes_per_crtc(crtc_state, states);
if (n < 0) {
kfree(states);
return n;
}
/* no active planes? */
if (n == 0) {
kfree(states);
continue;
}
/* 'zpos = 0' means primary plane */
if (states[0]->plane->type != DRM_PLANE_TYPE_PRIMARY) {
kfree(states);
return -EINVAL;
}
ret = dpu_atomic_compute_plane_base_per_crtc(crtc_state, states,
n, use_pc[dpu_crtc->crtc_grp_id]);
if (ret) {
kfree(states);
return ret;
}
dpu_atomic_set_top_plane_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
ret = dpu_atomic_assign_plane_source_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
if (ret) {
kfree(states);
return ret;
}
kfree(states);
if (pipe_states_prone_to_put[drm_crtc_index(crtc)])
dpu_atomic_put_possible_states_per_crtc(crtc_state);
}
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
return ret;
}
static void dpu_drm_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
drm_atomic_helper_wait_for_fences(dev, old_state, false);
drm_atomic_helper_wait_for_dependencies(old_state);
drm_atomic_helper_commit_tail(old_state);
drm_atomic_helper_commit_cleanup_done(old_state);
drm_atomic_state_put(old_state);
}
static void dpu_drm_commit_work(struct work_struct *work)
{
struct drm_atomic_state *state = container_of(work,
struct drm_atomic_state,
commit_work);
dpu_drm_commit_tail(state);
}
/*
* This is almost a copy of drm_atomic_helper_commit().
* For nonblock commits, we queue the work on a freezable and unbound work queue
* of our own instead of system_unbound_wq to make sure work items on the work
* queue are drained in the freeze phase of the system suspend operations.
*/
static int dpu_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
struct imx_drm_device *imxdrm = dev->dev_private;
int ret;
if (state->async_update) {
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
return 0;
}
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
INIT_WORK(&state->commit_work, dpu_drm_commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
goto err;
}
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err;
drm_atomic_state_get(state);
if (nonblock)
queue_work(imxdrm->dpu_nonblock_commit_wq, &state->commit_work);
else
dpu_drm_commit_tail(state);
return 0;
err:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
const struct drm_mode_config_funcs dpu_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = dpu_drm_output_poll_changed,
.atomic_check = dpu_drm_atomic_check,
.atomic_commit = dpu_drm_atomic_commit,
};
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_KMS_H_
#define _DPU_KMS_H_
extern const struct drm_mode_config_funcs dpu_drm_mode_config_funcs;
#endif
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <video/dpu.h>
#include <video/imx8-prefetch.h>
#include "dpu-plane.h"
#include "imx-drm.h"
/*
* RGB and packed/2planar YUV formats
* are widely supported by many fetch units.
*/
static const uint32_t dpu_primary_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
};
static const uint32_t dpu_overlay_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
};
static const uint64_t dpu_format_modifiers[] = {
DRM_FORMAT_MOD_VIVANTE_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
DRM_FORMAT_MOD_AMPHION_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static void dpu_plane_destroy(struct drm_plane *plane)
{
struct dpu_plane *dpu_plane = to_dpu_plane(plane);
drm_plane_cleanup(plane);
kfree(dpu_plane);
}
static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane_state *state;
if (plane->state) {
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_dpu_plane_state(plane->state));
plane->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
state->base.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
plane->state = &state->base;
plane->state->plane = plane;
plane->state->rotation = DRM_MODE_ROTATE_0;
}
static struct drm_plane_state *
dpu_drm_atomic_plane_duplicate_state(struct drm_plane *plane)
{
struct dpu_plane_state *state, *copy;
if (WARN_ON(!plane->state))
return NULL;
copy = kmalloc(sizeof(*state), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
state = to_dpu_plane_state(plane->state);
copy->stage = state->stage;
copy->source = state->source;
copy->blend = state->blend;
copy->aux_stage = state->aux_stage;
copy->aux_source = state->aux_source;
copy->aux_blend = state->aux_blend;
copy->layer_x = state->layer_x;
copy->layer_y = state->layer_y;
copy->base_x = state->base_x;
copy->base_y = state->base_y;
copy->base_w = state->base_w;
copy->base_h = state->base_h;
copy->is_top = state->is_top;
copy->use_prefetch = state->use_prefetch;
copy->use_aux_prefetch = state->use_aux_prefetch;
copy->need_aux_source = state->need_aux_source;
copy->left_layer_x = state->left_layer_x;
copy->left_base_x = state->left_base_x;
copy->left_base_w = state->left_base_w;
copy->left_src_w = state->left_src_w;
copy->left_crtc_w = state->left_crtc_w;
copy->right_layer_x = state->right_layer_x;
copy->right_base_x = state->right_base_x;
copy->right_base_w = state->right_base_w;
copy->right_src_w = state->right_src_w;
copy->right_crtc_w = state->right_crtc_w;
copy->is_left_top = state->is_left_top;
copy->is_right_top = state->is_right_top;
return &copy->base;
}
static void dpu_drm_atomic_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_dpu_plane_state(state));
}
static bool dpu_drm_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
return false;
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
return modifier == DRM_FORMAT_MOD_LINEAR;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_RGB565:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_AMPHION_TILED;
default:
return false;
}
}
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = dpu_plane_destroy,
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_drm_atomic_plane_duplicate_state,
.atomic_destroy_state = dpu_drm_atomic_plane_destroy_state,
.format_mod_supported = dpu_drm_plane_format_mod_supported,
};
static inline dma_addr_t
drm_plane_state_to_baseaddr(struct drm_plane_state *state, bool aux_source)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
unsigned int x = (state->src_x >> 16) +
(aux_source ? dpstate->left_src_w : 0);
unsigned int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
BUG_ON(!cma_obj);
if (fb->modifier)
return cma_obj->paddr + fb->offsets[0];
if (fb->flags & DRM_MODE_FB_INTERLACED)
y /= 2;
return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
drm_format_plane_cpp(fb->format->format, 0) * x;
}
static inline dma_addr_t
drm_plane_state_to_uvbaseaddr(struct drm_plane_state *state, bool aux_source)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
int x = (state->src_x >> 16) + (aux_source ? dpstate->left_src_w : 0);
int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
if (fb->modifier)
return cma_obj->paddr + fb->offsets[1];
x /= drm_format_horz_chroma_subsampling(fb->format->format);
y /= drm_format_vert_chroma_subsampling(fb->format->format);
if (fb->flags & DRM_MODE_FB_INTERLACED)
y /= 2;
return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
drm_format_plane_cpp(fb->format->format, 1) * x;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dpu_plane *dplane = to_dpu_plane(plane);
struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
struct dpu_plane_state *old_dpstate = to_dpu_plane_state(plane->state);
struct dpu_plane_res *res = &dplane->grp->res;
struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->fb;
struct dpu_fetchunit *fu;
struct dprc *dprc;
dma_addr_t baseaddr, uv_baseaddr = 0;
u32 src_w = state->src_w >> 16, src_h = state->src_h >> 16,
src_x = state->src_x >> 16, src_y = state->src_y >> 16;
unsigned int frame_width;
int bpp;
bool fb_is_interlaced;
bool check_aux_source = false;
/* pure software check */
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
if (WARN_ON(dpstate->base_x || dpstate->base_y ||
dpstate->base_w || dpstate->base_h))
return -EINVAL;
/* ok to disable */
if (!fb) {
dpstate->stage = LB_PRIM_SEL__DISABLE;
dpstate->source = LB_SEC_SEL__DISABLE;
dpstate->blend = ID_NONE;
dpstate->aux_stage = LB_PRIM_SEL__DISABLE;
dpstate->aux_source = LB_SEC_SEL__DISABLE;
dpstate->aux_blend = ID_NONE;
dpstate->layer_x = 0;
dpstate->layer_y = 0;
dpstate->base_x = 0;
dpstate->base_y = 0;
dpstate->base_w = 0;
dpstate->base_h = 0;
dpstate->is_top = false;
dpstate->use_prefetch = false;
dpstate->use_aux_prefetch = false;
dpstate->need_aux_source = false;
dpstate->left_layer_x = 0;
dpstate->left_base_x = 0;
dpstate->left_base_w = 0;
dpstate->left_src_w = 0;
dpstate->left_crtc_w = 0;
dpstate->right_layer_x = 0;
dpstate->right_base_x = 0;
dpstate->right_base_w = 0;
dpstate->right_src_w = 0;
dpstate->right_crtc_w = 0;
dpstate->is_left_top = false;
dpstate->is_right_top = false;
return 0;
}
if (!state->crtc)
return -EINVAL;
fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED);
if (fb->modifier &&
fb->modifier != DRM_FORMAT_MOD_AMPHION_TILED &&
fb->modifier != DRM_FORMAT_MOD_VIVANTE_TILED &&
fb->modifier != DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)
return -EINVAL;
if (dplane->grp->has_vproc) {
/* no down scaling */
if (src_w > state->crtc_w || src_h > state->crtc_h)
return -EINVAL;
} else {
/* no scaling */
if (src_w != state->crtc_w || src_h != state->crtc_h)
return -EINVAL;
}
/* no off screen */
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
crtc_state =
drm_atomic_get_existing_crtc_state(state->state, state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
/* mode set is needed when base x/y is changed */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
if ((dpstate->base_x != old_dpstate->base_x) ||
(dpstate->base_y != old_dpstate->base_y))
crtc_state->mode_changed = true;
if (state->crtc_x + state->crtc_w >
crtc_state->adjusted_mode.hdisplay)
return -EINVAL;
if (state->crtc_y + state->crtc_h >
crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
/* pixel/line count and position parameters check */
if (drm_format_horz_chroma_subsampling(fb->format->format) == 2) {
if (dpstate->left_src_w || dpstate->right_src_w) {
if ((dpstate->left_src_w % 2) ||
(dpstate->right_src_w % 2) || (src_x % 2))
return -EINVAL;
} else {
if ((src_w % 2) || (src_x % 2))
return -EINVAL;
}
}
if (drm_format_vert_chroma_subsampling(fb->format->format) == 2) {
if (src_h % (fb_is_interlaced ? 4 : 2))
return -EINVAL;
if (src_y % (fb_is_interlaced ? 4 : 2))
return -EINVAL;
}
/* for tile formats, framebuffer has to be tile aligned */
switch (fb->modifier) {
case DRM_FORMAT_MOD_AMPHION_TILED:
if (fb->width % 8)
return -EINVAL;
if (fb->height % 256)
return -EINVAL;
break;
case DRM_FORMAT_MOD_VIVANTE_TILED:
if (fb->width % 4)
return -EINVAL;
if (fb->height % 4)
return -EINVAL;
break;
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
if (fb->width % 64)
return -EINVAL;
if (fb->height % 64)
return -EINVAL;
break;
default:
break;
}
again:
fu = source_to_fu(res,
check_aux_source ? dpstate->aux_source : dpstate->source);
if (!fu)
return -EINVAL;
dprc = fu->dprc;
if (dpstate->need_aux_source)
frame_width = check_aux_source ?
dpstate->right_src_w : dpstate->left_src_w;
else
frame_width = src_w;
if (dprc &&
dprc_format_supported(dprc, fb->format->format, fb->modifier) &&
dprc_stride_supported(dprc, fb->pitches[0], fb->pitches[1],
frame_width, fb->format->format)) {
if (check_aux_source)
dpstate->use_aux_prefetch = true;
else
dpstate->use_prefetch = true;
} else {
if (check_aux_source)
dpstate->use_aux_prefetch = false;
else
dpstate->use_prefetch = false;
}
if (fb->modifier) {
if (check_aux_source && !dpstate->use_aux_prefetch)
return -EINVAL;
else if (!check_aux_source && !dpstate->use_prefetch)
return -EINVAL;
}
/*
* base address alignment check
*
* The (uv) base address offset introduced by PRG x/y
* offset(for tile formats) would not impact the alignment
* check, so we don't take the offset into consideration.
*/
baseaddr = drm_plane_state_to_baseaddr(state, check_aux_source);
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
bpp = 16;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
bpp = 8;
break;
default:
bpp = drm_format_plane_cpp(fb->format->format, 0) * 8;
break;
}
switch (bpp) {
case 32:
if (baseaddr & 0x3)
return -EINVAL;
break;
case 16:
if (fb->modifier) {
if (baseaddr & 0x1)
return -EINVAL;
} else {
if (check_aux_source) {
if (baseaddr &
(dpstate->use_aux_prefetch ? 0x7 : 0x1))
return -EINVAL;
} else {
if (baseaddr &
(dpstate->use_prefetch ? 0x7 : 0x1))
return -EINVAL;
}
}
break;
}
if (fb->pitches[0] > 0x10000)
return -EINVAL;
/* UV base address alignment check, assuming 16bpp */
if (drm_format_num_planes(fb->format->format) > 1) {
uv_baseaddr = drm_plane_state_to_uvbaseaddr(state,
check_aux_source);
if (fb->modifier) {
if (uv_baseaddr & 0x1)
return -EINVAL;
} else {
if (check_aux_source) {
if (uv_baseaddr &
(dpstate->use_aux_prefetch ? 0x7 : 0x1))
return -EINVAL;
} else {
if (uv_baseaddr &
(dpstate->use_prefetch ? 0x7 : 0x1))
return -EINVAL;
}
}
if (fb->pitches[1] > 0x10000)
return -EINVAL;
}
if (!check_aux_source && dpstate->use_prefetch &&
!dprc_stride_double_check(dprc, frame_width, src_x,
fb->format->format,
fb->modifier,
baseaddr, uv_baseaddr)) {
if (fb->modifier)
return -EINVAL;
if (bpp == 16 && (baseaddr & 0x1))
return -EINVAL;
if (uv_baseaddr & 0x1)
return -EINVAL;
dpstate->use_prefetch = false;
} else if (check_aux_source && dpstate->use_aux_prefetch &&
!dprc_stride_double_check(dprc, frame_width, src_x,
fb->format->format,
fb->modifier,
baseaddr, uv_baseaddr)) {
if (fb->modifier)
return -EINVAL;
if (bpp == 16 && (baseaddr & 0x1))
return -EINVAL;
if (uv_baseaddr & 0x1)
return -EINVAL;
dpstate->use_aux_prefetch = false;
}
if (dpstate->need_aux_source && !check_aux_source) {
check_aux_source = true;
goto again;
}
return 0;
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dpu_plane *dplane = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct dpu_plane_res *res = &dplane->grp->res;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe = NULL;
struct dprc *dprc;
struct dpu_hscaler *hs = NULL;
struct dpu_vscaler *vs = NULL;
struct dpu_layerblend *lb;
struct dpu_constframe *cf, *aux_cf;
struct dpu_extdst *ed;
struct dpu_framegen *fg, *aux_fg;
struct device *dev = plane->dev->dev;
dma_addr_t baseaddr, uv_baseaddr = 0;
dpu_block_id_t blend, fe_id, vs_id = ID_NONE, hs_id;
lb_sec_sel_t source;
lb_prim_sel_t stage;
unsigned int stream_id;
unsigned int src_w, src_h, src_x, src_y;
unsigned int layer_x;
unsigned int mt_w = 0, mt_h = 0; /* w/h in a micro-tile */
int bpp, lb_id;
bool need_fetcheco, need_hscaler = false, need_vscaler = false;
bool prefetch_start, uv_prefetch_start;
bool crtc_use_pc = dpstate->left_src_w || dpstate->right_src_w;
bool update_aux_source = false;
bool use_prefetch;
bool need_modeset;
bool is_overlay = plane->type == DRM_PLANE_TYPE_OVERLAY;
bool fb_is_interlaced;
/*
* Do nothing since the plane is disabled by
* crtc_func->atomic_begin/flush.
*/
if (!fb)
return;
need_modeset = drm_atomic_crtc_needs_modeset(state->crtc->state);
fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED);
again:
need_fetcheco = false;
prefetch_start = false;
uv_prefetch_start = false;
source = update_aux_source ? dpstate->aux_source : dpstate->source;
blend = update_aux_source ? dpstate->aux_blend : dpstate->blend;
stage = update_aux_source ? dpstate->aux_stage : dpstate->stage;
use_prefetch = update_aux_source ?
dpstate->use_aux_prefetch : dpstate->use_prefetch;
if (crtc_use_pc) {
if (update_aux_source) {
stream_id = 1;
layer_x = dpstate->right_layer_x;
} else {
stream_id = dpstate->left_src_w ? 0 : 1;
layer_x = dpstate->left_src_w ?
dpstate->left_layer_x : dpstate->right_layer_x;
}
} else {
stream_id = dplane->stream_id;
layer_x = dpstate->layer_x;
}
fg = res->fg[stream_id];
fu = source_to_fu(res, source);
if (!fu)
return;
dprc = fu->dprc;
lb_id = blend_to_id(blend);
if (lb_id < 0)
return;
lb = res->lb[lb_id];
if (crtc_use_pc) {
if (update_aux_source || !dpstate->left_src_w)
src_w = dpstate->right_src_w;
else
src_w = dpstate->left_src_w;
} else {
src_w = state->src_w >> 16;
}
src_h = state->src_h >> 16;
if (crtc_use_pc && update_aux_source) {
if (fb->modifier)
src_x = (state->src_x >> 16) + dpstate->left_src_w;
else
src_x = 0;
} else {
src_x = fb->modifier ? (state->src_x >> 16) : 0;
}
src_y = fb->modifier ? (state->src_y >> 16) : 0;
if (fetchunit_is_fetchdecode(fu)) {
if (fetchdecode_need_fetcheco(fu, fb->format->format)) {
need_fetcheco = true;
fe = fetchdecode_get_fetcheco(fu);
if (IS_ERR(fe))
return;
}
/* assume pixel combiner is unused */
if ((src_w != state->crtc_w) && !crtc_use_pc) {
need_hscaler = true;
hs = fetchdecode_get_hscaler(fu);
if (IS_ERR(hs))
return;
}
if ((src_h != state->crtc_h) || fb_is_interlaced) {
need_vscaler = true;
vs = fetchdecode_get_vscaler(fu);
if (IS_ERR(vs))
return;
}
}
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
bpp = 16;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
bpp = 8;
break;
default:
bpp = drm_format_plane_cpp(fb->format->format, 0) * 8;
break;
}
switch (fb->modifier) {
case DRM_FORMAT_MOD_AMPHION_TILED:
mt_w = 8;
mt_h = 8;
break;
case DRM_FORMAT_MOD_VIVANTE_TILED:
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
mt_w = (bpp == 16) ? 8 : 4;
mt_h = 4;
break;
default:
break;
}
baseaddr = drm_plane_state_to_baseaddr(state, update_aux_source);
if (need_fetcheco)
uv_baseaddr = drm_plane_state_to_uvbaseaddr(state,
update_aux_source);
if (use_prefetch &&
(fu->ops->get_stream_id(fu) == DPU_PLANE_SRC_DISABLED ||
need_modeset))
prefetch_start = true;
fu->ops->set_burstlength(fu, src_x, mt_w, bpp, baseaddr, use_prefetch);
fu->ops->set_src_bpp(fu, bpp);
fu->ops->set_src_stride(fu, src_w, src_x, mt_w, bpp, fb->pitches[0],
baseaddr, use_prefetch);
fu->ops->set_src_buf_dimensions(fu, src_w, src_h, 0, fb_is_interlaced);
fu->ops->set_fmt(fu, fb->format->format, fb_is_interlaced);
fu->ops->enable_src_buf(fu);
fu->ops->set_framedimensions(fu, src_w, src_h, fb_is_interlaced);
fu->ops->set_baseaddress(fu, src_w, src_x, src_y, mt_w, mt_h, bpp,
baseaddr);
fu->ops->set_stream_id(fu, stream_id ?
DPU_PLANE_SRC_TO_DISP_STREAM1 :
DPU_PLANE_SRC_TO_DISP_STREAM0);
fu->ops->unpin_off(fu);
dev_dbg(dev, "[PLANE:%d:%s] %s-0x%02x\n",
plane->base.id, plane->name, fu->name, fu->id);
if (need_fetcheco) {
fe_id = fetcheco_get_block_id(fe);
if (fe_id == ID_NONE)
return;
if (use_prefetch &&
(fe->ops->get_stream_id(fe) == DPU_PLANE_SRC_DISABLED ||
need_modeset))
uv_prefetch_start = true;
fetchdecode_pixengcfg_dynamic_src_sel(fu,
(fd_dynamic_src_sel_t)fe_id);
fe->ops->set_burstlength(fe, src_x, mt_w, bpp, uv_baseaddr,
use_prefetch);
fe->ops->set_src_bpp(fe, 16);
fe->ops->set_src_stride(fe, src_w, src_x, mt_w, bpp,
fb->pitches[1],
uv_baseaddr, use_prefetch);
fe->ops->set_fmt(fe, fb->format->format, fb_is_interlaced);
fe->ops->set_src_buf_dimensions(fe, src_w, src_h,
fb->format->format,
fb_is_interlaced);
fe->ops->set_framedimensions(fe, src_w, src_h,
fb_is_interlaced);
fe->ops->set_baseaddress(fe, src_w, src_x, src_y / 2,
mt_w, mt_h, bpp, uv_baseaddr);
fe->ops->enable_src_buf(fe);
fe->ops->set_stream_id(fe, stream_id ?
DPU_PLANE_SRC_TO_DISP_STREAM1 :
DPU_PLANE_SRC_TO_DISP_STREAM0);
fe->ops->unpin_off(fe);
dev_dbg(dev, "[PLANE:%d:%s] %s-0x%02x\n",
plane->base.id, plane->name, fe->name, fe_id);
} else {
if (fetchunit_is_fetchdecode(fu))
fetchdecode_pixengcfg_dynamic_src_sel(fu,
FD_SRC_DISABLE);
}
/* vscaler comes first */
if (need_vscaler) {
vs_id = vscaler_get_block_id(vs);
if (vs_id == ID_NONE)
return;
vscaler_pixengcfg_dynamic_src_sel(vs, (vs_src_sel_t)source);
vscaler_pixengcfg_clken(vs, CLKEN__AUTOMATIC);
vscaler_setup1(vs, src_h, state->crtc_h, fb_is_interlaced);
vscaler_setup2(vs, fb_is_interlaced);
vscaler_setup3(vs, fb_is_interlaced);
vscaler_output_size(vs, state->crtc_h);
vscaler_field_mode(vs, fb_is_interlaced ?
SCALER_ALWAYS0 : SCALER_INPUT);
vscaler_filter_mode(vs, SCALER_LINEAR);
vscaler_scale_mode(vs, SCALER_UPSCALE);
vscaler_mode(vs, SCALER_ACTIVE);
vscaler_set_stream_id(vs, dplane->stream_id ?
DPU_PLANE_SRC_TO_DISP_STREAM1 :
DPU_PLANE_SRC_TO_DISP_STREAM0);
source = (lb_sec_sel_t)vs_id;
dev_dbg(dev, "[PLANE:%d:%s] vscaler-0x%02x\n",
plane->base.id, plane->name, vs_id);
}
/* and then, hscaler */
if (need_hscaler) {
hs_id = hscaler_get_block_id(hs);
if (hs_id == ID_NONE)
return;
hscaler_pixengcfg_dynamic_src_sel(hs, need_vscaler ?
(hs_src_sel_t)vs_id :
(hs_src_sel_t)source);
hscaler_pixengcfg_clken(hs, CLKEN__AUTOMATIC);
hscaler_setup1(hs, src_w, state->crtc_w);
hscaler_output_size(hs, state->crtc_w);
hscaler_filter_mode(hs, SCALER_LINEAR);
hscaler_scale_mode(hs, SCALER_UPSCALE);
hscaler_mode(hs, SCALER_ACTIVE);
hscaler_set_stream_id(hs, dplane->stream_id ?
DPU_PLANE_SRC_TO_DISP_STREAM1 :
DPU_PLANE_SRC_TO_DISP_STREAM0);
source = (lb_sec_sel_t)hs_id;
dev_dbg(dev, "[PLANE:%d:%s] hscaler-0x%02x\n",
plane->base.id, plane->name, hs_id);
}
if (use_prefetch) {
dprc_configure(dprc, stream_id,
src_w, src_h, src_x, src_y,
fb->pitches[0], fb->format->format,
fb->modifier, baseaddr, uv_baseaddr,
prefetch_start, uv_prefetch_start,
fb_is_interlaced);
if (prefetch_start || uv_prefetch_start)
dprc_enable(dprc);
dprc_reg_update(dprc);
if (prefetch_start || uv_prefetch_start) {
dprc_first_frame_handle(dprc);
if (!need_modeset && is_overlay)
framegen_wait_for_frame_counter_moving(fg);
}
if (update_aux_source)
dev_dbg(dev, "[PLANE:%d:%s] use aux prefetch\n",
plane->base.id, plane->name);
else
dev_dbg(dev, "[PLANE:%d:%s] use prefetch\n",
plane->base.id, plane->name);
} else if (dprc) {
dprc_disable(dprc);
if (update_aux_source)
dev_dbg(dev, "[PLANE:%d:%s] bypass aux prefetch\n",
plane->base.id, plane->name);
else
dev_dbg(dev, "[PLANE:%d:%s] bypass prefetch\n",
plane->base.id, plane->name);
}
layerblend_pixengcfg_dynamic_prim_sel(lb, stage);
layerblend_pixengcfg_dynamic_sec_sel(lb, source);
layerblend_control(lb, LB_BLEND);
layerblend_blendcontrol(lb, need_hscaler || need_vscaler);
layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC);
layerblend_position(lb, layer_x, dpstate->layer_y);
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
unsigned int base_w, base_x;
cf = res->cf[stream_id];
if (crtc_use_pc) {
if (update_aux_source || !dpstate->left_crtc_w) {
base_w = dpstate->right_base_w;
base_x = dpstate->right_base_x;
} else {
base_w = dpstate->left_base_w;
base_x = dpstate->left_base_x;
}
if (!dpstate->left_crtc_w || !dpstate->right_crtc_w) {
aux_cf = dpu_aux_cf_peek(cf);
aux_fg = dpu_aux_fg_peek(fg);
constframe_framedimensions_copy_prim(aux_cf);
constframe_constantcolor(aux_cf, 0, 0, 0, 0);
framegen_sacfg(aux_fg, 0, 0);
}
} else {
base_w = dpstate->base_w;
base_x = dpstate->base_x;
}
constframe_framedimensions(cf, base_w, dpstate->base_h);
constframe_constantcolor(cf, 0, 0, 0, 0);
framegen_sacfg(fg, base_x, dpstate->base_y);
}
if (crtc_use_pc) {
if ((!stream_id && dpstate->is_left_top) ||
(stream_id && dpstate->is_right_top)) {
ed = res->ed[stream_id];
extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend);
}
} else {
if (dpstate->is_top) {
ed = res->ed[stream_id];
extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend);
}
}
if (update_aux_source)
dev_dbg(dev, "[PLANE:%d:%s] *aux* source-0x%02x stage-0x%02x blend-0x%02x\n",
plane->base.id, plane->name,
source, stage, blend);
else
dev_dbg(dev, "[PLANE:%d:%s] source-0x%02x stage-0x%02x blend-0x%02x\n",
plane->base.id, plane->name,
source, stage, blend);
if (dpstate->need_aux_source && !update_aux_source) {
update_aux_source = true;
goto again;
}
}
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = dpu_plane_atomic_check,
.atomic_update = dpu_plane_atomic_update,
};
struct dpu_plane *dpu_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
unsigned int stream_id,
struct dpu_plane_grp *grp,
enum drm_plane_type type)
{
struct dpu_plane *dpu_plane;
struct drm_plane *plane;
unsigned int ov_num;
int ret;
dpu_plane = kzalloc(sizeof(*dpu_plane), GFP_KERNEL);
if (!dpu_plane)
return ERR_PTR(-ENOMEM);
dpu_plane->stream_id = stream_id;
dpu_plane->grp = grp;
plane = &dpu_plane->base;
if (type == DRM_PLANE_TYPE_PRIMARY)
ret = drm_universal_plane_init(drm, plane, possible_crtcs,
&dpu_plane_funcs,
dpu_primary_formats,
ARRAY_SIZE(dpu_primary_formats),
dpu_format_modifiers,
type, NULL);
else
ret = drm_universal_plane_init(drm, plane, possible_crtcs,
&dpu_plane_funcs,
dpu_overlay_formats,
ARRAY_SIZE(dpu_overlay_formats),
dpu_format_modifiers,
type, NULL);
if (ret) {
kfree(dpu_plane);
return ERR_PTR(ret);
}
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
switch (type) {
case DRM_PLANE_TYPE_PRIMARY:
ret = drm_plane_create_zpos_immutable_property(plane, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
/* filter out the primary plane */
ov_num = grp->hw_plane_num - 1;
ret = drm_plane_create_zpos_property(plane, 1, 1, ov_num);
break;
default:
ret = -EINVAL;
}
if (ret) {
kfree(dpu_plane);
return ERR_PTR(ret);
}
return dpu_plane;
}
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __DPU_PLANE_H__
#define __DPU_PLANE_H__
#include <video/dpu.h>
#include "imx-drm.h"
#define MAX_DPU_PLANE_GRP (MAX_CRTC / 2)
enum dpu_plane_src_type {
DPU_PLANE_SRC_FL,
DPU_PLANE_SRC_FW,
DPU_PLANE_SRC_FD,
};
struct dpu_plane {
struct drm_plane base;
struct dpu_plane_grp *grp;
struct list_head head;
unsigned int stream_id;
};
struct dpu_plane_state {
struct drm_plane_state base;
lb_prim_sel_t stage;
lb_sec_sel_t source;
dpu_block_id_t blend;
lb_prim_sel_t aux_stage;
lb_sec_sel_t aux_source;
dpu_block_id_t aux_blend;
unsigned int layer_x;
unsigned int layer_y;
unsigned int base_x;
unsigned int base_y;
unsigned int base_w;
unsigned int base_h;
bool is_top;
bool use_prefetch;
bool use_aux_prefetch;
bool need_aux_source;
/* used when pixel combiner is needed */
unsigned int left_layer_x;
unsigned int left_base_x;
unsigned int left_base_w;
unsigned int left_src_w;
unsigned int left_crtc_w;
unsigned int right_layer_x;
unsigned int right_base_x;
unsigned int right_base_w;
unsigned int right_src_w;
unsigned int right_crtc_w;
bool is_left_top;
bool is_right_top;
};
static const lb_prim_sel_t cf_stages[] = {LB_PRIM_SEL__CONSTFRAME0,
LB_PRIM_SEL__CONSTFRAME1};
static const lb_prim_sel_t stages[] = {LB_PRIM_SEL__LAYERBLEND0,
LB_PRIM_SEL__LAYERBLEND1,
LB_PRIM_SEL__LAYERBLEND2,
LB_PRIM_SEL__LAYERBLEND3,
LB_PRIM_SEL__LAYERBLEND4,
LB_PRIM_SEL__LAYERBLEND5};
/* FIXME: Correct the source entries for subsidiary layers. */
static const lb_sec_sel_t sources[] = {LB_SEC_SEL__FETCHLAYER0,
LB_SEC_SEL__FETCHLAYER1,
LB_SEC_SEL__FETCHWARP2,
LB_SEC_SEL__FETCHDECODE0,
LB_SEC_SEL__FETCHDECODE1,
LB_SEC_SEL__FETCHDECODE2,
LB_SEC_SEL__FETCHDECODE3};
static const dpu_block_id_t blends[] = {ID_LAYERBLEND0, ID_LAYERBLEND1,
ID_LAYERBLEND2, ID_LAYERBLEND3,
ID_LAYERBLEND4, ID_LAYERBLEND5};
static inline struct dpu_plane *to_dpu_plane(struct drm_plane *plane)
{
return container_of(plane, struct dpu_plane, base);
}
static inline struct dpu_plane_state *
to_dpu_plane_state(struct drm_plane_state *plane_state)
{
return container_of(plane_state, struct dpu_plane_state, base);
}
static inline int source_to_type(lb_sec_sel_t source)
{
switch (source) {
case LB_SEC_SEL__FETCHLAYER0:
case LB_SEC_SEL__FETCHLAYER1:
return DPU_PLANE_SRC_FL;
case LB_SEC_SEL__FETCHWARP2:
return DPU_PLANE_SRC_FW;
case LB_SEC_SEL__FETCHDECODE0:
case LB_SEC_SEL__FETCHDECODE1:
case LB_SEC_SEL__FETCHDECODE2:
case LB_SEC_SEL__FETCHDECODE3:
return DPU_PLANE_SRC_FD;
default:
break;
}
WARN_ON(1);
return -EINVAL;
}
static inline int source_to_id(lb_sec_sel_t source)
{
int i, offset = 0;
int type = source_to_type(source);
for (i = 0; i < ARRAY_SIZE(sources); i++) {
if (source != sources[i])
continue;
/* FetchLayer */
if (type == DPU_PLANE_SRC_FL)
return i;
/* FetchWarp or FetchDecode */
while (offset < ARRAY_SIZE(sources)) {
if (source_to_type(sources[offset]) == type)
break;
offset++;
}
return i - offset;
}
WARN_ON(1);
return -EINVAL;
}
static inline struct dpu_fetchunit *
source_to_fu(struct dpu_plane_res *res, lb_sec_sel_t source)
{
int fu_type = source_to_type(source);
int fu_id = source_to_id(source);
if (fu_type < 0 || fu_id < 0)
return NULL;
switch (fu_type) {
case DPU_PLANE_SRC_FD:
return res->fd[fu_id];
case DPU_PLANE_SRC_FL:
return res->fl[fu_id];
case DPU_PLANE_SRC_FW:
return res->fw[fu_id];
}
return NULL;
}
static inline struct dpu_fetchunit *
dpstate_to_fu(struct dpu_plane_state *dpstate)
{
struct drm_plane *plane = dpstate->base.plane;
struct dpu_plane *dplane = to_dpu_plane(plane);
struct dpu_plane_res *res = &dplane->grp->res;
return source_to_fu(res, dpstate->source);
}
static inline int blend_to_id(dpu_block_id_t blend)
{
int i;
for (i = 0; i < ARRAY_SIZE(blends); i++) {
if (blend == blends[i])
return i;
}
WARN_ON(1);
return -EINVAL;
}
static inline bool drm_format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return true;
default:
break;
}
return false;
}
struct dpu_plane *dpu_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
unsigned int stream_id,
struct dpu_plane_grp *grp,
enum drm_plane_type type);
#endif
......@@ -27,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
#include <video/dpu.h>
#include <video/imx-ipu-v3.h>
#include <video/imx-lcdif.h>
#include <video/imx-dcss.h>
......@@ -135,6 +136,10 @@ static int compare_of(struct device *dev, void *data)
} else if (strcmp(dev->driver->name, "imx-dcss-crtc") == 0) {
struct dcss_client_platformdata *pdata = dev->platform_data;
return pdata->of_node == np;
} else if (strcmp(dev->driver->name, "imx-dpu-crtc") == 0) {
struct dpu_client_platformdata *pdata = dev->platform_data;
return pdata->of_node == np;
}
......@@ -147,6 +152,11 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == np;
}
static const char *const imx_drm_dpu_comp_parents[] = {
"fsl,imx8qm-dpu",
"fsl,imx8qxp-dpu",
};
static const char *const imx_drm_dcss_comp_parents[] = {
"nxp,imx8mq-dcss",
};
......@@ -179,6 +189,12 @@ static bool imx_drm_parent_is_compatible(struct device *dev,
return ret;
}
static inline bool has_dpu(struct device *dev)
{
return imx_drm_parent_is_compatible(dev, imx_drm_dpu_comp_parents,
ARRAY_SIZE(imx_drm_dpu_comp_parents));
}
static inline bool has_dcss(struct device *dev)
{
return imx_drm_parent_is_compatible(dev, imx_drm_dcss_comp_parents,
......@@ -204,12 +220,22 @@ static int imx_drm_bind(struct device *dev)
imxdrm->drm = drm;
drm->dev_private = imxdrm;
if (has_dpu(dev)) {
imxdrm->dpu_nonblock_commit_wq =
alloc_workqueue("dpu_nonblock_commit_wq",
WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!imxdrm->dpu_nonblock_commit_wq) {
ret = -ENOMEM;
goto err_wq;
}
}
if (has_dcss(dev)) {
imxdrm->dcss_nonblock_commit_wq =
alloc_ordered_workqueue("dcss_nonblock_commit_wq", 0);
if (!imxdrm->dcss_nonblock_commit_wq) {
ret = -ENOMEM;
goto err_unref;
goto err_wq;
}
}
......@@ -236,7 +262,7 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
if (has_dcss(dev)) {
if (has_dpu(dev) || has_dcss(dev)) {
drm->mode_config.allow_fb_modifiers = true;
dev_dbg(dev, "allow fb modifiers\n");
}
......@@ -293,10 +319,11 @@ err_unbind:
component_unbind_all(drm->dev, drm);
err_kms:
drm_mode_config_cleanup(drm);
err_wq:
if (imxdrm->dcss_nonblock_commit_wq)
destroy_workqueue(imxdrm->dcss_nonblock_commit_wq);
if (imxdrm->dpu_nonblock_commit_wq)
destroy_workqueue(imxdrm->dpu_nonblock_commit_wq);
err_unref:
drm_dev_put(drm);
......@@ -308,6 +335,9 @@ static void imx_drm_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct imx_drm_device *imxdrm = drm->dev_private;
if (has_dpu(dev))
flush_workqueue(imxdrm->dpu_nonblock_commit_wq);
if (has_dcss(dev))
flush_workqueue(imxdrm->dcss_nonblock_commit_wq);
......@@ -322,6 +352,9 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
if (has_dpu(dev))
destroy_workqueue(imxdrm->dpu_nonblock_commit_wq);
if (has_dcss(dev))
destroy_workqueue(imxdrm->dcss_nonblock_commit_wq);
......
......@@ -20,6 +20,7 @@ struct imx_drm_device {
struct drm_fbdev_cma *fbhelper;
struct drm_atomic_state *state;
struct workqueue_struct *dpu_nonblock_commit_wq;
struct workqueue_struct *dcss_nonblock_commit_wq;
struct {
wait_queue_head_t wait;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment