diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt index 681da9b42cd4c9864564df13472735b91c3fb649..31ce11a17a4a801ddb6b1743726ab847938ecd0a 100644 --- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt @@ -226,6 +226,84 @@ dcss_drm: dcss@0x32e00000 { }; }; +Freescale i.MX DPU +==================== + +Required properties: +- compatible: Should be "fsl,<chip>-dpu" +- reg: should be register base and length as documented in the + datasheet +- intsteer: phandle pointing to interrupt steer. +- interrupts, interrupt-names: Should contain interrupts and names as + documented in the datasheet. +- clocks, clock-names: phandles to the DPU clocks described in + Documentation/devicetree/bindings/clock/clock-bindings.txt + The following clocks are expected on i.MX8qm and i.MX8qxp: + "pll0" - PLL clock for display interface 0 + "pll1" - PLL clock for display interface 1 + "disp0" - pixel clock for display interface 0 + "disp1" - pixel clock for display interface 1 + The needed clock numbers for each are documented in + Documentation/devicetree/bindings/clock/imx8qm-clock.txt, and in + Documentation/devicetree/bindings/clock/imx8qxp-clock.txt. +- power-domains: phandle pointing to power domain. +- fsl,dpr-channels: phandles to the DPR channels attached to this DPU, + sorted by memory map addresses. Only valid for i.MX8qm and i.MX8qxp. +- fsl,pixel-combiner: phandle to the pixel combiner unit attached to this DPU. +Optional properties: +- port@[0-1]: Port nodes with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + ports 0 and 1 should correspond to display interface 0 and + display interface 1, respectively. + +example: + +dpu: dpu@56180000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8qm-dpu"; + reg = <0x0 0x56180000 0x0 0x40000>; + intsteer = <&dpu1_intsteer>; + interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irq_common", + "irq_stream0a", + "irq_stream0b", + "irq_stream1a", + "irq_stream1b", + "irq_reserved0", + "irq_reserved1", + "irq_blit"; + clocks = <&clk IMX8QM_DC0_PLL0_CLK>, + <&clk IMX8QM_DC0_PLL1_CLK>, + <&clk IMX8QM_DC0_DISP0_CLK>, + <&clk IMX8QM_DC0_DISP1_CLK>; + clock-names = "pll0", "pll1", "disp0", "disp1"; + power-domains = <&pd_dc0>; + fsl,dpr-channels = <&dpr1_channel1>, <&dpr1_channel2>, + <&dpr1_channel3>, <&dpr2_channel1>, + <&dpr2_channel2>, <&dpr2_channel3>; + fsl,pixel-combiner = <&pixel_combiner1>; + + dpu1_disp1: port@1 { + reg = <1>; + + dpu1_disp1_lvds0: lvds0-endpoint { + remote-endpoint = <&ldb1_lvds0>; + }; + + dpu1_disp1_lvds1: lvds1-endpoint { + remote-endpoint = <&ldb1_lvds1>; + }; + }; +}; + Freescale i.MX8 PC (Pixel Combiner) ============================================= Required properties: diff --git a/drivers/gpu/imx/Kconfig b/drivers/gpu/imx/Kconfig index f3184aec15d3648f2f50c720559b0df61110f7de..f639890544da414a239b9a0b1430b2ca19bc9e7e 100644 --- a/drivers/gpu/imx/Kconfig +++ b/drivers/gpu/imx/Kconfig @@ -16,3 +16,4 @@ config IMX8_DPRC source drivers/gpu/imx/ipu-v3/Kconfig source drivers/gpu/imx/lcdif/Kconfig source drivers/gpu/imx/dcss/Kconfig +source drivers/gpu/imx/dpu/Kconfig diff --git a/drivers/gpu/imx/Makefile b/drivers/gpu/imx/Makefile index 37701682e5b47baad98b05fdb3bc97f8849e40a2..b8766e1ef951e0b050bbc7b2986a14214f613748 100644 --- a/drivers/gpu/imx/Makefile +++ b/drivers/gpu/imx/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_IMX8_DPRC) += imx8_dprc.o obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ obj-$(CONFIG_IMX_LCDIF_CORE) += lcdif/ obj-$(CONFIG_IMX_DCSS_CORE) += dcss/ +obj-$(CONFIG_IMX_DPU_CORE) += dpu/ diff --git a/drivers/gpu/imx/dpu/Kconfig b/drivers/gpu/imx/dpu/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..7ccba746d6d68cf87287e0aa512b1a3068378fa1 --- /dev/null +++ b/drivers/gpu/imx/dpu/Kconfig @@ -0,0 +1,12 @@ +config IMX_DPU_CORE + tristate "i.MX DPU core support" + depends on ARCH_FSL_IMX8QM || ARCH_FSL_IMX8QXP + select RESET_CONTROLLER + select GENERIC_IRQ_CHIP + select IMX8_PC + select IMX8_PRG + select IMX8_DPRC + help + Choose this if you have a Freescale i.MX8QM or i.MX8QXP system and + want to use the Display Processing Unit. This option only enables + DPU base support. diff --git a/drivers/gpu/imx/dpu/Makefile b/drivers/gpu/imx/dpu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4563bacf6398e81b4b8b26548d0ab68e945eabe3 --- /dev/null +++ b/drivers/gpu/imx/dpu/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_IMX_DPU_CORE) += imx-dpu-core.o + +imx-dpu-core-objs := dpu-common.o dpu-constframe.o dpu-disengcfg.o \ + dpu-extdst.o dpu-fetchdecode.o dpu-fetcheco.o \ + dpu-fetchlayer.o dpu-fetchwarp.o dpu-fetchunit.o \ + dpu-framegen.o dpu-hscaler.o dpu-layerblend.o \ + dpu-store.o dpu-tcon.o dpu-vscaler.o diff --git a/drivers/gpu/imx/dpu/dpu-common.c b/drivers/gpu/imx/dpu/dpu-common.c new file mode 100644 index 0000000000000000000000000000000000000000..35cd5cbdea884c63ea829c1aeaa645350138538f --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-common.c @@ -0,0 +1,1917 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#include <linux/clk.h> +#include <linux/fb.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <soc/imx8/sc/sci.h> +#include <video/dpu.h> +#include <video/imx8-pc.h> +#include <video/imx8-prefetch.h> +#include "dpu-prv.h" + +static bool display_plane_video_proc = true; +module_param(display_plane_video_proc, bool, 0444); +MODULE_PARM_DESC(display_plane_video_proc, + "Enable video processing for display [default=true]"); + +#define DPU_CM_REG_DEFINE1(name1, name2) \ +static inline u32 name1(const struct cm_reg_ofs *ofs) \ +{ \ + return ofs->name2; \ +} + +#define DPU_CM_REG_DEFINE2(name1, name2) \ +static inline u32 name1(const struct cm_reg_ofs *ofs, \ + unsigned int n) \ +{ \ + return ofs->name2 + (4 * n); \ +} + +DPU_CM_REG_DEFINE1(IPIDENTIFIER, ipidentifier); + +#define DESIGNDELIVERYID_MASK 0xF0U +#define DESIGNDELIVERYID_SHIFT 4U + +#define DESIGNMATURITYLEVEL_MASK 0xF00U +#define DESIGNMATURITYLEVEL_SHIFT 8U +enum design_maturity_level { + /* Pre feasibility study. */ + DESIGNMATURITYLEVEL__PREFS = 1 << DESIGNMATURITYLEVEL_SHIFT, + /* Feasibility study. */ + DESIGNMATURITYLEVEL__FS = 2 << DESIGNMATURITYLEVEL_SHIFT, + /* Functionality complete. */ + DESIGNMATURITYLEVEL__R0 = 3 << DESIGNMATURITYLEVEL_SHIFT, + /* Verification complete. */ + DESIGNMATURITYLEVEL__R1 = 4 << DESIGNMATURITYLEVEL_SHIFT, +}; + +#define IPEVOLUTION_MASK 0xF000U +#define IPEVOLUTION_SHIFT 12U + +#define IPFEATURESET_MASK 0xF0000U +#define IPFEATURESET_SHIFT 16U +enum ip_feature_set { + /* Minimal functionality (Eco). */ + IPFEATURESET__E = 1 << IPFEATURESET_SHIFT, + /* Reduced functionality (Light). */ + IPFEATURESET__L = 2 << IPFEATURESET_SHIFT, + /* Advanced functionality (Plus). */ + IPFEATURESET__P = 4 << IPFEATURESET_SHIFT, + /* Extensive functionality (eXtensive). */ + IPFEATURESET__X = 5 << IPFEATURESET_SHIFT, +}; + +#define IPAPPLICATION_MASK 0xF00000U +#define IPAPPLICATION_SHIFT 20U +enum ip_application { + /* Blit Engine only. */ + IPAPPLICATION__B = 1 << IPAPPLICATION_SHIFT, + /* Blit Engine and Display Controller. */ + IPAPPLICATION__D = 2 << IPAPPLICATION_SHIFT, + /* Display Controller only (with direct capture). */ + IPAPPLICATION__V = 3 << IPAPPLICATION_SHIFT, + /* + * Blit Engine, Display Controller (with direct capture), + * Capture Controller (buffered capture) and Drawing Engine. + */ + IPAPPLICATION__G = 4 << IPAPPLICATION_SHIFT, + /* Display Controller only. */ + IPAPPLICATION__C = 5 << IPAPPLICATION_SHIFT, +}; + +#define IPCONFIGURATION_MASK 0xF000000U +#define IPCONFIGURATION_SHIFT 24U +enum ip_configuration { + /* Graphics core only (Module). */ + IPCONFIGURATION__M = 1 << IPCONFIGURATION_SHIFT, + /* Subsystem including a graphics core (System). */ + IPCONFIGURATION__S = 2 << IPCONFIGURATION_SHIFT, +}; + +#define IPFAMILY_MASK 0xF0000000U +#define IPFAMILY_SHIFT 28U +enum ip_family { + /* IMXDPU building block generation 2010. */ + IPFAMILY__IMXDPU2010 = 0, + /* IMXDPU building block generation 2012. */ + IPFAMILY__IMXDPU2012 = 1 << IPFAMILY_SHIFT, + /* IMXDPU building block generation 2013. */ + IPFAMILY__IMXDPU2013 = 2 << IPFAMILY_SHIFT, +}; + +DPU_CM_REG_DEFINE1(LOCKUNLOCK, lockunlock); +DPU_CM_REG_DEFINE1(LOCKSTATUS, lockstatus); +DPU_CM_REG_DEFINE2(USERINTERRUPTMASK, userinterruptmask); +DPU_CM_REG_DEFINE2(INTERRUPTENABLE, interruptenable); +DPU_CM_REG_DEFINE2(INTERRUPTPRESET, interruptpreset); +DPU_CM_REG_DEFINE2(INTERRUPTCLEAR, interruptclear); +DPU_CM_REG_DEFINE2(INTERRUPTSTATUS, interruptstatus); +DPU_CM_REG_DEFINE2(USERINTERRUPTENABLE, userinterruptenable); +DPU_CM_REG_DEFINE2(USERINTERRUPTPRESET, userinterruptpreset); +DPU_CM_REG_DEFINE2(USERINTERRUPTCLEAR, userinterruptclear); +DPU_CM_REG_DEFINE2(USERINTERRUPTSTATUS, userinterruptstatus); +DPU_CM_REG_DEFINE1(GENERALPURPOSE, generalpurpose); + +static inline u32 dpu_cm_read(struct dpu_soc *dpu, unsigned int offset) +{ + return readl(dpu->cm_reg + offset); +} + +static inline void dpu_cm_write(struct dpu_soc *dpu, u32 value, + unsigned int offset) +{ + writel(value, dpu->cm_reg + offset); +} + +/* Constant Frame Unit */ +static const unsigned long cf_ofss[] = {0x4400, 0x5400, 0x4c00, 0x5c00}; +static const unsigned long cf_pec_ofss_v1[] = {0x980, 0xa00, 0x9c0, 0xa40}; +static const unsigned long cf_pec_ofss_v2[] = {0x960, 0x9e0, 0x9a0, 0xa20}; + +/* Display Engine Configuration Unit */ +static const unsigned long dec_ofss_v1[] = {0x10000, 0x10020}; +static const unsigned long dec_ofss_v2[] = {0xb400, 0xb420}; + +/* External Destination Unit */ +static const unsigned long ed_ofss[] = {0x4800, 0x5800, 0x5000, 0x6000}; +static const unsigned long ed_pec_ofss_v1[] = {0x9a0, 0xa20, 0x9e0, 0xa60}; +static const unsigned long ed_pec_ofss_v2[] = {0x980, 0xa00, 0x9c0, 0xa40}; + +/* Fetch Decode Unit */ +static const unsigned long fd_ofss_v1[] = {0x8c00, 0x9800, 0x7400, 0x7c00}; +static const unsigned long fd_ofss_v2[] = {0x6c00, 0x7800}; +static const unsigned long fd_pec_ofss_v1[] = {0xb60, 0xb80, 0xb00, 0xb20}; +static const unsigned long fd_pec_ofss_v2[] = {0xa80, 0xaa0}; + +/* Fetch ECO Unit */ +static const unsigned long fe_ofss_v1[] = {0x9400, 0xa000, 0x8800, 0x1c00}; +static const unsigned long fe_ofss_v2[] = {0x7400, 0x8000, 0x6800, 0x1c00}; +static const unsigned long fe_pec_ofss_v1[] = {0xb70, 0xb90, 0xb50, 0x870}; +static const unsigned long fe_pec_ofss_v2[] = {0xa90, 0xab0, 0xa70, 0x850}; + +/* Frame Generator Unit */ +static const unsigned long fg_ofss_v1[] = {0x10c00, 0x12800}; +static const unsigned long fg_ofss_v2[] = {0xb800, 0xd400}; + +/* Fetch Layer Unit */ +static const unsigned long fl_ofss_v1[] = {0xa400, 0xac00}; +static const unsigned long fl_ofss_v2[] = {0x8400}; +static const unsigned long fl_pec_ofss_v1[] = {0xba0, 0xbb0}; +static const unsigned long fl_pec_ofss_v2[] = {0xac0}; + +/* Fetch Warp Unit */ +static const unsigned long fw_ofss_v1[] = {0x8400}; +static const unsigned long fw_ofss_v2[] = {0x6400}; +static const unsigned long fw_pec_ofss_v1[] = {0xb40}; +static const unsigned long fw_pec_ofss_v2[] = {0xa60}; + +/* Horizontal Scaler Unit */ +static const unsigned long hs_ofss_v1[] = {0xbc00, 0xd000, 0x3000}; +static const unsigned long hs_ofss_v2[] = {0x9000, 0x9c00, 0x3000}; +static const unsigned long hs_pec_ofss_v1[] = {0xc00, 0xca0, 0x8e0}; +static const unsigned long hs_pec_ofss_v2[] = {0xb00, 0xb60, 0x8c0}; + +/* Layer Blend Unit */ +static const unsigned long lb_ofss_v1[] = {0xdc00, 0xe000, 0xe400, 0xe800, + 0xec00, 0xf000, 0xf400}; +static const unsigned long lb_ofss_v2[] = {0xa400, 0xa800, 0xac00, 0xb000}; +static const unsigned long lb_pec_ofss_v1[] = {0xd00, 0xd20, 0xd40, 0xd60, + 0xd80, 0xda0, 0xdc0}; +static const unsigned long lb_pec_ofss_v2[] = {0xba0, 0xbc0, 0xbe0, 0xc00}; + +/* Store Unit */ +static const unsigned long st_ofss_v1[] = {0x4000}; +static const unsigned long st_ofss_v2[] = {0x4000}; +static const unsigned long st_pec_ofss_v1[] = {0x960}; +static const unsigned long st_pec_ofss_v2[] = {0x940}; + +/* Timing Controller Unit */ +static const unsigned long tcon_ofss_v1[] = {0x12000, 0x13c00}; +static const unsigned long tcon_ofss_v2[] = {0xcc00, 0xe800}; + +/* Vertical Scaler Unit */ +static const unsigned long vs_ofss_v1[] = {0xc000, 0xd400, 0x3400}; +static const unsigned long vs_ofss_v2[] = {0x9400, 0xa000, 0x3400}; +static const unsigned long vs_pec_ofss_v1[] = {0xc20, 0xcc0, 0x900}; +static const unsigned long vs_pec_ofss_v2[] = {0xb20, 0xb80, 0x8e0}; + +static const struct dpu_unit cfs_v1 = { + .name = "ConstFrame", + .num = ARRAY_SIZE(cf_ids), + .ids = cf_ids, + .pec_ofss = cf_pec_ofss_v1, + .ofss = cf_ofss, +}; + +static const struct dpu_unit cfs_v2 = { + .name = "ConstFrame", + .num = ARRAY_SIZE(cf_ids), + .ids = cf_ids, + .pec_ofss = cf_pec_ofss_v2, + .ofss = cf_ofss, +}; + +static const struct dpu_unit decs_v1 = { + .name = "DisEngCfg", + .num = ARRAY_SIZE(dec_ids), + .ids = dec_ids, + .pec_ofss = NULL, + .ofss = dec_ofss_v1, +}; + +static const struct dpu_unit decs_v2 = { + .name = "DisEngCfg", + .num = ARRAY_SIZE(dec_ids), + .ids = dec_ids, + .pec_ofss = NULL, + .ofss = dec_ofss_v2, +}; + +static const struct dpu_unit eds_v1 = { + .name = "ExtDst", + .num = ARRAY_SIZE(ed_ids), + .ids = ed_ids, + .pec_ofss = ed_pec_ofss_v1, + .ofss = ed_ofss, +}; + +static const struct dpu_unit eds_v2 = { + .name = "ExtDst", + .num = ARRAY_SIZE(ed_ids), + .ids = ed_ids, + .pec_ofss = ed_pec_ofss_v2, + .ofss = ed_ofss, +}; + +static const struct dpu_unit fds_v1 = { + .name = "FetchDecode", + .num = ARRAY_SIZE(fd_ids), + .ids = fd_ids, + .pec_ofss = fd_pec_ofss_v1, + .ofss = fd_ofss_v1, +}; + +static const struct dpu_unit fds_v2 = { + .name = "FetchDecode", + .num = 2, + .ids = fd_ids, + .pec_ofss = fd_pec_ofss_v2, + .ofss = fd_ofss_v2, + .dprc_ids = fd_dprc_ids, +}; + +static const struct dpu_unit fes_v1 = { + .name = "FetchECO", + .num = ARRAY_SIZE(fe_ids), + .ids = fe_ids, + .pec_ofss = fe_pec_ofss_v1, + .ofss = fe_ofss_v1, +}; + +static const struct dpu_unit fes_v2 = { + .name = "FetchECO", + .num = ARRAY_SIZE(fe_ids), + .ids = fe_ids, + .pec_ofss = fe_pec_ofss_v2, + .ofss = fe_ofss_v2, +}; + +static const struct dpu_unit fgs_v1 = { + .name = "FrameGen", + .num = ARRAY_SIZE(fg_ids), + .ids = fg_ids, + .pec_ofss = NULL, + .ofss = fg_ofss_v1, +}; + +static const struct dpu_unit fgs_v2 = { + .name = "FrameGen", + .num = ARRAY_SIZE(fg_ids), + .ids = fg_ids, + .pec_ofss = NULL, + .ofss = fg_ofss_v2, +}; + +static const struct dpu_unit fls_v1 = { + .name = "FetchLayer", + .num = ARRAY_SIZE(fl_ids), + .ids = fl_ids, + .pec_ofss = fl_pec_ofss_v1, + .ofss = fl_ofss_v1, +}; + +static const struct dpu_unit fls_v2 = { + .name = "FetchLayer", + .num = 1, + .ids = fl_ids, + .pec_ofss = fl_pec_ofss_v2, + .ofss = fl_ofss_v2, + .dprc_ids = fl_dprc_ids, +}; + +static const struct dpu_unit fws_v1 = { + .name = "FetchWarp", + .num = ARRAY_SIZE(fw_ids), + .ids = fw_ids, + .pec_ofss = fw_pec_ofss_v1, + .ofss = fw_ofss_v1, +}; + +static const struct dpu_unit fws_v2 = { + .name = "FetchWarp", + .num = ARRAY_SIZE(fw_ids), + .ids = fw_ids, + .pec_ofss = fw_pec_ofss_v2, + .ofss = fw_ofss_v2, + .dprc_ids = fw_dprc_ids, +}; + +static const struct dpu_unit hss_v1 = { + .name = "HScaler", + .num = ARRAY_SIZE(hs_ids), + .ids = hs_ids, + .pec_ofss = hs_pec_ofss_v1, + .ofss = hs_ofss_v1, +}; + +static const struct dpu_unit hss_v2 = { + .name = "HScaler", + .num = ARRAY_SIZE(hs_ids), + .ids = hs_ids, + .pec_ofss = hs_pec_ofss_v2, + .ofss = hs_ofss_v2, +}; + +static const struct dpu_unit lbs_v1 = { + .name = "LayerBlend", + .num = ARRAY_SIZE(lb_ids), + .ids = lb_ids, + .pec_ofss = lb_pec_ofss_v1, + .ofss = lb_ofss_v1, +}; + +static const struct dpu_unit lbs_v2 = { + .name = "LayerBlend", + .num = 4, + .ids = lb_ids, + .pec_ofss = lb_pec_ofss_v2, + .ofss = lb_ofss_v2, +}; + +static const struct dpu_unit sts_v1 = { + .name = "Store", + .num = ARRAY_SIZE(st_ids), + .ids = st_ids, + .pec_ofss = st_pec_ofss_v1, + .ofss = st_ofss_v1, +}; + +static const struct dpu_unit sts_v2 = { + .name = "Store", + .num = ARRAY_SIZE(st_ids), + .ids = st_ids, + .pec_ofss = st_pec_ofss_v2, + .ofss = st_ofss_v2, +}; + +static const struct dpu_unit tcons_v1 = { + .name = "TCon", + .num = ARRAY_SIZE(tcon_ids), + .ids = tcon_ids, + .pec_ofss = NULL, + .ofss = tcon_ofss_v1, +}; + +static const struct dpu_unit tcons_v2 = { + .name = "TCon", + .num = ARRAY_SIZE(tcon_ids), + .ids = tcon_ids, + .pec_ofss = NULL, + .ofss = tcon_ofss_v2, +}; + +static const struct dpu_unit vss_v1 = { + .name = "VScaler", + .num = ARRAY_SIZE(vs_ids), + .ids = vs_ids, + .pec_ofss = vs_pec_ofss_v1, + .ofss = vs_ofss_v1, +}; + +static const struct dpu_unit vss_v2 = { + .name = "VScaler", + .num = ARRAY_SIZE(vs_ids), + .ids = vs_ids, + .pec_ofss = vs_pec_ofss_v2, + .ofss = vs_ofss_v2, +}; + +static const struct cm_reg_ofs cm_reg_ofs_v1 = { + .ipidentifier = 0, + .lockunlock = 0x80, + .lockstatus = 0x84, + .userinterruptmask = 0x88, + .interruptenable = 0x94, + .interruptpreset = 0xa0, + .interruptclear = 0xac, + .interruptstatus = 0xb8, + .userinterruptenable = 0x100, + .userinterruptpreset = 0x10c, + .userinterruptclear = 0x118, + .userinterruptstatus = 0x124, + .generalpurpose = 0x200, +}; + +static const struct cm_reg_ofs cm_reg_ofs_v2 = { + .ipidentifier = 0, + .lockunlock = 0x40, + .lockstatus = 0x44, + .userinterruptmask = 0x48, + .interruptenable = 0x50, + .interruptpreset = 0x58, + .interruptclear = 0x60, + .interruptstatus = 0x68, + .userinterruptenable = 0x80, + .userinterruptpreset = 0x88, + .userinterruptclear = 0x90, + .userinterruptstatus = 0x98, + .generalpurpose = 0x100, +}; + +static const unsigned int intsteer_map_v1[] = { + /* 0 1 2 3 4 5 6 7 */ /* 0~31: int0 */ + 448, 449, 450, 64, 65, 66, 67, 68, + /* 8 9 10 11 12 13 14 15 */ + 69, 70, 193, 194, 195, 196, 197, 320, + /* 16 17 18 19 20 21 22 23 */ + 321, 322, 384, 385, 386, NA, 323, NA, + /* 24 25 26 27 28 29 30 31 */ + 387, 71, 198, 72, 73, 74, 75, 76, + /* 32 33 34 35 36 37 38 39 */ /* 32~63: int1 */ + 77, 78, 79, 80, 81, 199, 200, 201, + /* 40 41 42 43 44 45 46 47 */ + 202, 203, 204, 205, 206, 207, 208, 324, + /* 48 49 50 51 52 53 54 55 */ + 389, NA, 0, 1, 2, 3, 4, 82, + /* 56 57 58 59 60 61 62 63 */ + 83, 84, 85, 209, 210, 211, 212, 325, + /* 64 65 66 */ /* 64+: int2 */ + 326, 390, 391, +}; +static const unsigned long unused_irq_v1[] = {0x00a00000, 0x00020000, + 0xfffffff8}; + +static const unsigned int intsteer_map_v2[] = { + /* 0 1 2 3 4 5 6 7 */ /* 0~31: int0 */ + 448, 449, 450, 64, 65, 66, 67, 68, + /* 8 9 10 11 12 13 14 15 */ + 69, 70, 193, 194, 195, 196, 197, 72, + /* 16 17 18 19 20 21 22 23 */ + 73, 74, 75, 76, 77, 78, 79, 80, + /* 24 25 26 27 28 29 30 31 */ + 81, 199, 200, 201, 202, 203, 204, 205, + /* 32 33 34 35 36 37 38 39 */ /* 32+: int1 */ + 206, 207, 208, NA, 0, 1, 2, 3, + /* 40 41 42 43 44 45 46 47 */ + 4, 82, 83, 84, 85, 209, 210, 211, + /* 48 */ + 212, +}; +static const unsigned long unused_irq_v2[] = {0x00000000, 0xfffe0008}; + +static const unsigned int sw2hw_irq_map_v2[] = { + /* 0 1 2 3 4 5 6 7 */ + 0, 1, 2, 3, 4, 5, 6, 7, + /* 8 9 10 11 12 13 14 15 */ + 8, 9, 10, 11, 12, 13, 14, NA, + /* 16 17 18 19 20 21 22 23 */ + NA, NA, NA, NA, NA, NA, NA, NA, + /* 24 25 26 27 28 29 30 31 */ + NA, NA, NA, 15, 16, 17, 18, 19, + /* 32 33 34 35 36 37 38 39 */ + 20, 21, 22, 23, 24, 25, 26, 27, + /* 40 41 42 43 44 45 46 47 */ + 28, 29, 30, 31, 32, 33, 34, NA, + /* 48 49 50 51 52 53 54 55 */ + NA, NA, 36, 37, 38, 39, 40, 41, + /* 56 57 58 59 60 61 62 63 */ + 42, 43, 44, 45, 46, 47, 48, NA, + /* 64 65 66 */ + NA, NA, NA, +}; + +/* FIXME: overkill for some N/As, revive them when needed */ +static const unsigned int sw2hw_block_id_map_v2[] = { + /* 0 1 2 3 4 5 6 7 */ + 0x00, NA, NA, 0x03, NA, NA, NA, 0x07, + /* 8 9 10 11 12 13 14 15 */ + 0x08, NA, 0x0a, NA, 0x0c, NA, 0x0e, NA, + /* 16 17 18 19 20 21 22 23 */ + 0x10, NA, 0x12, NA, NA, NA, NA, NA, + /* 24 25 26 27 28 29 30 31 */ + NA, NA, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + /* 32 33 34 35 36 37 38 39 */ + 0x1a, NA, NA, 0x1b, 0x1c, 0x1d, NA, NA, + /* 40 41 42 43 44 45 46 47 */ + 0x1e, 0x1f, 0x20, NA, 0x21, 0x22, 0x23, 0x24, + /* 48 49 50 51 52 53 54 55 */ + NA, NA, NA, NA, NA, NA, NA, NA, + /* 56 57 58 59 60 61 62 63 */ + NA, NA, NA, NA, NA, NA, NA, NA, + /* 64 65 66 67 */ + NA, NA, NA, NA, +}; + +static const struct dpu_devtype dpu_type_v1 = { + .cm_ofs = 0x0, + .cfs = &cfs_v1, + .decs = &decs_v1, + .eds = &eds_v1, + .fds = &fds_v1, + .fes = &fes_v1, + .fgs = &fgs_v1, + .fls = &fls_v1, + .fws = &fws_v1, + .hss = &hss_v1, + .lbs = &lbs_v1, + .sts = &sts_v1, + .tcons = &tcons_v1, + .vss = &vss_v1, + .cm_reg_ofs = &cm_reg_ofs_v1, + .intsteer_map = intsteer_map_v1, + .intsteer_map_size = ARRAY_SIZE(intsteer_map_v1), + .unused_irq = unused_irq_v1, + .plane_src_na_mask = 0xffffff80, + .has_capture = true, + .has_prefetch = false, + .has_disp_sel_clk = false, + .has_dual_ldb = false, + .has_pc = false, + .has_syncmode_fixup = false, + .pixel_link_quirks = false, + .pixel_link_nhvsync = false, + .version = DPU_V1, +}; + +static const struct dpu_devtype dpu_type_v2_qm = { + .cm_ofs = 0x0, + .cfs = &cfs_v2, + .decs = &decs_v2, + .eds = &eds_v2, + .fds = &fds_v2, + .fes = &fes_v2, + .fgs = &fgs_v2, + .fls = &fls_v2, + .fws = &fws_v2, + .hss = &hss_v2, + .lbs = &lbs_v2, + .sts = &sts_v2, + .tcons = &tcons_v2, + .vss = &vss_v2, + .cm_reg_ofs = &cm_reg_ofs_v2, + .intsteer_map = intsteer_map_v2, + .intsteer_map_size = ARRAY_SIZE(intsteer_map_v2), + .unused_irq = unused_irq_v2, + .sw2hw_irq_map = sw2hw_irq_map_v2, + .sw2hw_block_id_map = sw2hw_block_id_map_v2, + .plane_src_na_mask = 0xffffffe2, + .has_capture = false, + .has_prefetch = true, + .has_disp_sel_clk = true, + .has_dual_ldb = false, + .has_pc = true, + .has_syncmode_fixup = true, + .syncmode_min_prate = 300000, + .singlemode_max_width = 1920, + .master_stream_id = 1, + .pixel_link_quirks = true, + .pixel_link_nhvsync = true, + .version = DPU_V2, +}; + +static const struct dpu_devtype dpu_type_v2_qxp = { + .cm_ofs = 0x0, + .cfs = &cfs_v2, + .decs = &decs_v2, + .eds = &eds_v2, + .fds = &fds_v2, + .fes = &fes_v2, + .fgs = &fgs_v2, + .fls = &fls_v2, + .fws = &fws_v2, + .hss = &hss_v2, + .lbs = &lbs_v2, + .sts = &sts_v2, + .tcons = &tcons_v2, + .vss = &vss_v2, + .cm_reg_ofs = &cm_reg_ofs_v2, + .intsteer_map = intsteer_map_v2, + .intsteer_map_size = ARRAY_SIZE(intsteer_map_v2), + .unused_irq = unused_irq_v2, + .sw2hw_irq_map = sw2hw_irq_map_v2, + .sw2hw_block_id_map = sw2hw_block_id_map_v2, + .plane_src_na_mask = 0xffffffe2, + .has_capture = false, + .has_prefetch = true, + .has_disp_sel_clk = false, + .has_dual_ldb = true, + .has_pc = true, + .has_syncmode_fixup = false, + .syncmode_min_prate = UINT_MAX, /* pc is unused */ + .singlemode_max_width = UINT_MAX, /* pc is unused */ + .pixel_link_quirks = true, + .pixel_link_nhvsync = true, + .version = DPU_V2, +}; + +static const struct of_device_id dpu_dt_ids[] = { + { + .compatible = "fsl,imx8qm-dpu", + .data = &dpu_type_v2_qm, + }, { + .compatible = "fsl,imx8qxp-dpu", + .data = &dpu_type_v2_qxp, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, dpu_dt_ids); + +bool dpu_has_pc(struct dpu_soc *dpu) +{ + return dpu->devtype->has_pc; +} +EXPORT_SYMBOL_GPL(dpu_has_pc); + +unsigned int dpu_get_syncmode_min_prate(struct dpu_soc *dpu) +{ + if (dpu->devtype->has_pc) + return dpu->devtype->syncmode_min_prate; + else + return UINT_MAX; +} +EXPORT_SYMBOL_GPL(dpu_get_syncmode_min_prate); + +unsigned int dpu_get_singlemode_max_width(struct dpu_soc *dpu) +{ + if (dpu->devtype->has_pc) + return dpu->devtype->singlemode_max_width; + else + return UINT_MAX; +} +EXPORT_SYMBOL_GPL(dpu_get_singlemode_max_width); + +unsigned int dpu_get_master_stream_id(struct dpu_soc *dpu) +{ + if (dpu->devtype->has_pc) + return dpu->devtype->master_stream_id; + else + return UINT_MAX; +} +EXPORT_SYMBOL_GPL(dpu_get_master_stream_id); + +bool dpu_vproc_has_fetcheco_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_FETCHECO); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_fetcheco_cap); + +bool dpu_vproc_has_hscale_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_HSCALE); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_hscale_cap); + +bool dpu_vproc_has_vscale_cap(u32 cap_mask) +{ + return !!(cap_mask & DPU_VPROC_CAP_VSCALE); +} +EXPORT_SYMBOL_GPL(dpu_vproc_has_vscale_cap); + +u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_FETCHECO; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_fetcheco_cap); + +u32 dpu_vproc_get_hscale_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_HSCALE; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_hscale_cap); + +u32 dpu_vproc_get_vscale_cap(u32 cap_mask) +{ + return cap_mask & DPU_VPROC_CAP_VSCALE; +} +EXPORT_SYMBOL_GPL(dpu_vproc_get_vscale_cap); + +int dpu_format_horz_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return 2; + default: + return 1; + } +} + +int dpu_format_vert_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} + +int dpu_format_num_planes(u32 format) +{ + switch (format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return 2; + default: + return 1; + } +} + +int dpu_format_plane_width(int width, u32 format, int plane) +{ + if (plane >= dpu_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / dpu_format_horz_chroma_subsampling(format); +} + +int dpu_format_plane_height(int height, u32 format, int plane) +{ + if (plane >= dpu_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / dpu_format_vert_chroma_subsampling(format); +} + +#define _DPU_UNITS_INIT(unit) \ +{ \ + const struct dpu_unit *us = devtype->unit##s; \ + int i; \ + \ + /* software check */ \ + if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \ + return -EINVAL; \ + \ + for (i = 0; i < us->num; i++) \ + _dpu_##unit##_init(dpu, us->ids[i]); \ +} + +static int +_dpu_submodules_init(struct dpu_soc *dpu, struct platform_device *pdev) +{ + const struct dpu_devtype *devtype = dpu->devtype; + + _DPU_UNITS_INIT(cf); + _DPU_UNITS_INIT(dec); + _DPU_UNITS_INIT(ed); + _DPU_UNITS_INIT(fd); + _DPU_UNITS_INIT(fe); + _DPU_UNITS_INIT(fg); + _DPU_UNITS_INIT(fl); + _DPU_UNITS_INIT(fw); + _DPU_UNITS_INIT(hs); + _DPU_UNITS_INIT(lb); + _DPU_UNITS_INIT(tcon); + _DPU_UNITS_INIT(vs); + + return 0; +} + +#define DPU_UNIT_INIT(dpu, base, unit, name, id, pec_ofs, ofs) \ +{ \ + int ret; \ + ret = dpu_##unit##_init((dpu), (id), \ + (pec_ofs) ? (base) + (pec_ofs) : 0, \ + (base) + (ofs)); \ + if (ret) { \ + dev_err((dpu)->dev, "init %s%d failed with %d\n", \ + (name), (id), ret); \ + return ret; \ + } \ +} + +#define DPU_UNITS_INIT(unit) \ +{ \ + const struct dpu_unit *us = devtype->unit##s; \ + int i; \ + \ + /* software check */ \ + if (WARN_ON(us->num > ARRAY_SIZE(unit##_ids))) \ + return -EINVAL; \ + \ + for (i = 0; i < us->num; i++) \ + DPU_UNIT_INIT(dpu, dpu_base, unit, us->name, \ + us->ids[i], \ + us->pec_ofss ? us->pec_ofss[i] : 0, \ + us->ofss[i]); \ +} + +static int dpu_submodules_init(struct dpu_soc *dpu, + struct platform_device *pdev, unsigned long dpu_base) +{ + const struct dpu_devtype *devtype = dpu->devtype; + const struct dpu_unit *fds = devtype->fds; + const struct dpu_unit *fls = devtype->fls; + const struct dpu_unit *fws = devtype->fws; + const struct dpu_unit *tcons = devtype->tcons; + + DPU_UNITS_INIT(cf); + DPU_UNITS_INIT(dec); + DPU_UNITS_INIT(ed); + DPU_UNITS_INIT(fd); + DPU_UNITS_INIT(fe); + DPU_UNITS_INIT(fg); + DPU_UNITS_INIT(fl); + DPU_UNITS_INIT(fw); + DPU_UNITS_INIT(hs); + DPU_UNITS_INIT(lb); + DPU_UNITS_INIT(st); + DPU_UNITS_INIT(tcon); + DPU_UNITS_INIT(vs); + + /* get DPR channel for submodules */ + if (devtype->has_prefetch) { + struct dpu_fetchunit *fu; + struct dprc *dprc; + int i; + + for (i = 0; i < fds->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, + "fsl,dpr-channels", + fds->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fd_get(dpu, i); + fetchunit_get_dprc(fu, dprc); + dpu_fd_put(fu); + } + + for (i = 0; i < fls->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, + "fsl,dpr-channels", + fls->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fl_get(dpu, i); + fetchunit_get_dprc(fu, dprc); + dpu_fl_put(fu); + } + + for (i = 0; i < fws->num; i++) { + dprc = dprc_lookup_by_phandle(dpu->dev, + "fsl,dpr-channels", + fws->dprc_ids[i]); + if (!dprc) + return -EPROBE_DEFER; + + fu = dpu_fw_get(dpu, fw_ids[i]); + fetchunit_get_dprc(fu, dprc); + dpu_fw_put(fu); + } + } + + /* get pixel combiner */ + if (devtype->has_pc) { + struct dpu_tcon *tcon; + struct pc *pc = + pc_lookup_by_phandle(dpu->dev, "fsl,pixel-combiner"); + int i; + + if (!pc) + return -EPROBE_DEFER; + + for (i = 0; i < tcons->num; i++) { + tcon = dpu_tcon_get(dpu, i); + tcon_get_pc(tcon, pc); + dpu_tcon_put(tcon); + } + } + + return 0; +} + +#define DPU_UNITS_ADDR_DBG(unit) \ +{ \ + const struct dpu_unit *us = devtype->unit##s; \ + int i; \ + for (i = 0; i < us->num; i++) { \ + if (us->pec_ofss) { \ + dev_dbg(&pdev->dev, "%s%d: pixengcfg @ 0x%08lx,"\ + " unit @ 0x%08lx\n", us->name, \ + us->ids[i], \ + dpu_base + us->pec_ofss[i], \ + dpu_base + us->ofss[i]); \ + } else { \ + dev_dbg(&pdev->dev, \ + "%s%d: unit @ 0x%08lx\n", us->name, \ + us->ids[i], dpu_base + us->ofss[i]); \ + } \ + } \ +} + +enum dpu_irq_line { + DPU_IRQ_LINE_CM = 0, + DPU_IRQ_LINE_STREAM0A = 1, + DPU_IRQ_LINE_STREAM1A = 3, + DPU_IRQ_LINE_RESERVED0 = 5, + DPU_IRQ_LINE_RESERVED1 = 6, + DPU_IRQ_LINE_BLIT = 7, +}; + +static inline unsigned int dpu_get_max_intsteer_num(enum dpu_irq_line irq_line) +{ + return 64 * (++irq_line) - 1; +} + +static inline unsigned int dpu_get_min_intsteer_num(enum dpu_irq_line irq_line) +{ + return 64 * irq_line; +} + +static void +dpu_inner_irq_handle(struct irq_desc *desc, enum dpu_irq_line irq_line) +{ + struct dpu_soc *dpu = irq_desc_get_handler_data(desc); + const struct dpu_devtype *devtype = dpu->devtype; + const struct cm_reg_ofs *ofs = devtype->cm_reg_ofs; + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int i, virq, min_intsteer_num, max_intsteer_num; + u32 status; + + chained_irq_enter(chip, desc); + + min_intsteer_num = dpu_get_min_intsteer_num(irq_line); + max_intsteer_num = dpu_get_max_intsteer_num(irq_line); + + for (i = 0; i < devtype->intsteer_map_size; i++) { + if (devtype->intsteer_map[i] >= min_intsteer_num && + devtype->intsteer_map[i] <= max_intsteer_num) { + status = dpu_cm_read(dpu, + USERINTERRUPTSTATUS(ofs, i / 32)); + status &= dpu_cm_read(dpu, + USERINTERRUPTENABLE(ofs, i / 32)); + + if (status & BIT(i % 32)) { + virq = irq_linear_revmap(dpu->domain, i); + if (virq) { + generic_handle_irq(virq); + } + } + } + } + + chained_irq_exit(chip, desc); +} + +#define DPU_INNER_IRQ_HANDLER_DEFINE(name1, name2) \ +static void dpu_##name1##_irq_handler(struct irq_desc *desc) \ +{ \ + dpu_inner_irq_handle(desc, DPU_IRQ_LINE_##name2); \ +} + +DPU_INNER_IRQ_HANDLER_DEFINE(cm, CM) +DPU_INNER_IRQ_HANDLER_DEFINE(stream0a, STREAM0A) +DPU_INNER_IRQ_HANDLER_DEFINE(stream1a, STREAM1A) +DPU_INNER_IRQ_HANDLER_DEFINE(reserved0, RESERVED0) +DPU_INNER_IRQ_HANDLER_DEFINE(reserved1, RESERVED1) +DPU_INNER_IRQ_HANDLER_DEFINE(blit, BLIT) + +int dpu_map_inner_irq(struct dpu_soc *dpu, int irq) +{ + const unsigned int *sw2hw_irq_map = dpu->devtype->sw2hw_irq_map; + int virq, mapped_irq; + + mapped_irq = sw2hw_irq_map ? sw2hw_irq_map[irq] : irq; + if (WARN_ON(mapped_irq == NA)) + return -EINVAL; + + virq = irq_linear_revmap(dpu->domain, mapped_irq); + if (!virq) + virq = irq_create_mapping(dpu->domain, mapped_irq); + + return virq; +} +EXPORT_SYMBOL_GPL(dpu_map_inner_irq); + +static int platform_remove_devices_fn(struct device *dev, void *unused) +{ + struct platform_device *pdev = to_platform_device(dev); + + platform_device_unregister(pdev); + + return 0; +} + +static void platform_device_unregister_children(struct platform_device *pdev) +{ + device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn); +} + +struct dpu_platform_reg { + struct dpu_client_platformdata pdata; + const char *name; +}; + +static struct dpu_platform_reg client_reg[] = { + { + /* placeholder */ + .pdata = { }, + .name = "imx-dpu-csi", + }, { + /* placeholder */ + .pdata = { }, + .name = "imx-dpu-csi", + }, { + .pdata = { + .stream_id = 0, + }, + .name = "imx-dpu-crtc", + }, { + .pdata = { + .stream_id = 1, + }, + .name = "imx-dpu-crtc", + }, { + .pdata = { }, + .name = "imx-drm-dpu-bliteng", + }, +}; + +static DEFINE_MUTEX(dpu_client_id_mutex); +static int dpu_client_id; + +static int dpu_get_plane_resource(struct dpu_soc *dpu, + struct dpu_plane_res *res) +{ + const struct dpu_unit *fds = dpu->devtype->fds; + const struct dpu_unit *fls = dpu->devtype->fls; + const struct dpu_unit *fws = dpu->devtype->fws; + const struct dpu_unit *lbs = dpu->devtype->lbs; + struct dpu_plane_grp *grp = plane_res_to_grp(res); + int i; + + for (i = 0; i < ARRAY_SIZE(res->cf); i++) { + res->cf[i] = dpu_cf_get(dpu, i); + if (IS_ERR(res->cf[i])) + return PTR_ERR(res->cf[i]); + } + for (i = 0; i < ARRAY_SIZE(res->ed); i++) { + res->ed[i] = dpu_ed_get(dpu, i); + if (IS_ERR(res->ed[i])) + return PTR_ERR(res->ed[i]); + } + for (i = 0; i < fds->num; i++) { + res->fd[i] = dpu_fd_get(dpu, i); + if (IS_ERR(res->fd[i])) + return PTR_ERR(res->fd[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fe); i++) { + res->fe[i] = dpu_fe_get(dpu, i); + if (IS_ERR(res->fe[i])) + return PTR_ERR(res->fe[i]); + grp->hw_plane_fetcheco_num = ARRAY_SIZE(res->fe); + } + for (i = 0; i < fls->num; i++) { + res->fl[i] = dpu_fl_get(dpu, i); + if (IS_ERR(res->fl[i])) + return PTR_ERR(res->fl[i]); + } + for (i = 0; i < fws->num; i++) { + res->fw[i] = dpu_fw_get(dpu, fw_ids[i]); + if (IS_ERR(res->fw[i])) + return PTR_ERR(res->fw[i]); + } + /* HScaler could be shared with capture. */ + if (display_plane_video_proc) { + for (i = 0; i < ARRAY_SIZE(res->hs); i++) { + res->hs[i] = dpu_hs_get(dpu, hs_ids[i]); + if (IS_ERR(res->hs[i])) + return PTR_ERR(res->hs[i]); + } + grp->hw_plane_hscaler_num = ARRAY_SIZE(res->hs); + } + for (i = 0; i < lbs->num; i++) { + res->lb[i] = dpu_lb_get(dpu, i); + if (IS_ERR(res->lb[i])) + return PTR_ERR(res->lb[i]); + } + /* VScaler could be shared with capture. */ + if (display_plane_video_proc) { + for (i = 0; i < ARRAY_SIZE(res->vs); i++) { + res->vs[i] = dpu_vs_get(dpu, vs_ids[i]); + if (IS_ERR(res->vs[i])) + return PTR_ERR(res->vs[i]); + } + grp->hw_plane_vscaler_num = ARRAY_SIZE(res->vs); + } + + grp->hw_plane_num = fds->num + fls->num + fws->num; + + return 0; +} + +static void dpu_put_plane_resource(struct dpu_plane_res *res) +{ + struct dpu_plane_grp *grp = plane_res_to_grp(res); + int i; + + for (i = 0; i < ARRAY_SIZE(res->cf); i++) { + if (!IS_ERR_OR_NULL(res->cf[i])) + dpu_cf_put(res->cf[i]); + } + for (i = 0; i < ARRAY_SIZE(res->ed); i++) { + if (!IS_ERR_OR_NULL(res->ed[i])) + dpu_ed_put(res->ed[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fd); i++) { + if (!IS_ERR_OR_NULL(res->fd[i])) + dpu_fd_put(res->fd[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fe); i++) { + if (!IS_ERR_OR_NULL(res->fe[i])) + dpu_fe_put(res->fe[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fl); i++) { + if (!IS_ERR_OR_NULL(res->fl[i])) + dpu_fl_put(res->fl[i]); + } + for (i = 0; i < ARRAY_SIZE(res->fw); i++) { + if (!IS_ERR_OR_NULL(res->fw[i])) + dpu_fw_put(res->fw[i]); + } + for (i = 0; i < ARRAY_SIZE(res->hs); i++) { + if (!IS_ERR_OR_NULL(res->hs[i])) + dpu_hs_put(res->hs[i]); + } + for (i = 0; i < ARRAY_SIZE(res->lb); i++) { + if (!IS_ERR_OR_NULL(res->lb[i])) + dpu_lb_put(res->lb[i]); + } + for (i = 0; i < ARRAY_SIZE(res->vs); i++) { + if (!IS_ERR_OR_NULL(res->vs[i])) + dpu_vs_put(res->vs[i]); + } + + grp->hw_plane_num = 0; +} + +static int dpu_add_client_devices(struct dpu_soc *dpu) +{ + const struct dpu_devtype *devtype = dpu->devtype; + struct device *dev = dpu->dev; + struct dpu_platform_reg *reg; + struct dpu_plane_grp *plane_grp; + struct dpu_store *st9 = NULL; + size_t client_num, reg_size; + int i, id, ret; + + client_num = ARRAY_SIZE(client_reg); + if (!devtype->has_capture) + client_num -= 2; + + reg = devm_kcalloc(dev, client_num, sizeof(*reg), GFP_KERNEL); + if (!reg) + return -ENODEV; + + plane_grp = devm_kzalloc(dev, sizeof(*plane_grp), GFP_KERNEL); + if (!plane_grp) + return -ENODEV; + + mutex_init(&plane_grp->mutex); + + mutex_lock(&dpu_client_id_mutex); + id = dpu_client_id; + dpu_client_id += client_num; + mutex_unlock(&dpu_client_id_mutex); + + reg_size = client_num * sizeof(struct dpu_platform_reg); + if (devtype->has_capture) + memcpy(reg, client_reg, reg_size); + else + memcpy(reg, &client_reg[2], reg_size); + + plane_grp->src_na_mask = devtype->plane_src_na_mask; + plane_grp->id = id / client_num; + plane_grp->has_vproc = display_plane_video_proc; + + ret = dpu_get_plane_resource(dpu, &plane_grp->res); + if (ret) + goto err_get_plane_res; + + /* + * Store9 is shared bewteen display engine(for sync mode + * fixup) and blit engine. + */ + if (devtype->has_syncmode_fixup) { + st9 = dpu_st_get(dpu, 9); + if (IS_ERR(st9)) { + ret = PTR_ERR(st9); + goto err_get_plane_res; + } + } + + for (i = 0; i < client_num; i++) { + struct platform_device *pdev; + struct device_node *of_node = NULL; + bool is_disp, is_bliteng; + + if (devtype->has_capture) { + is_bliteng = (i == 4) ? true : false; + is_disp = (!is_bliteng) && ((i / 2) ? true : false); + } else { + is_bliteng = (i == 2) ? true : false; + is_disp = !is_bliteng; + } + + if (is_bliteng) { + /* As bliteng has no of_node, so to use dpu's. */ + of_node = dev->of_node; + } else { + /* + * Associate subdevice with the + * corresponding port node. + */ + of_node = of_graph_get_port_by_id(dev->of_node, i); + if (!of_node) { + dev_info(dev, "no port@%d node in %s, not using %s%d\n", + i, dev->of_node->full_name, + is_disp ? "DISP" : "CSI", i % 2); + continue; + } + } + + if (is_disp) { + reg[i].pdata.plane_grp = plane_grp; + reg[i].pdata.di_grp_id = plane_grp->id; + reg[i].pdata.st9 = st9; + } + + pdev = platform_device_alloc(reg[i].name, id++); + if (!pdev) { + ret = -ENOMEM; + goto err_register; + } + + pdev->dev.parent = dev; + + reg[i].pdata.of_node = of_node; + ret = platform_device_add_data(pdev, ®[i].pdata, + sizeof(reg[i].pdata)); + if (!ret) + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + goto err_register; + } + } + + return 0; + +err_register: + platform_device_unregister_children(to_platform_device(dev)); + if (devtype->has_syncmode_fixup) + dpu_st_put(st9); +err_get_plane_res: + dpu_put_plane_resource(&plane_grp->res); + + return ret; +} + +#define IRQSTEER_CHANnCTL 0x0 +#define IRQSTEER_CHANnCTL_CH(n) BIT(n) +#define IRQSTEER_CHANnMASK(n) ((n) + 4) +#define LINE_TO_MASK_OFFSET(n) ((15 - ((n) / 32)) * 4) +#define LINE_TO_MASK_SHIFT(n) ((n) % 32) + +static void dpu_inner_irq_gc_mask_set_bit(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct dpu_soc *dpu = gc->domain->host_data; + unsigned long flags; + u32 mask = d->mask; + + irq_gc_lock(gc); + spin_lock_irqsave(&dpu->intsteer_lock, flags); + if (++dpu->intsteer_usecount == 1) + /* assuming fast I/O regmap */ + regmap_write(dpu->intsteer_regmap, IRQSTEER_CHANnCTL, + IRQSTEER_CHANnCTL_CH(0)); + spin_unlock_irqrestore(&dpu->intsteer_lock, flags); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_gc_unlock(gc); +} + +static void dpu_inner_irq_gc_mask_clr_bit(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct dpu_soc *dpu = gc->domain->host_data; + unsigned long flags; + u32 mask = d->mask; + + irq_gc_lock(gc); + spin_lock_irqsave(&dpu->intsteer_lock, flags); + if (!--dpu->intsteer_usecount) { + WARN(dpu->intsteer_usecount < 0, + "intsteer usecount %d is less than zero", + dpu->intsteer_usecount); + regmap_write(dpu->intsteer_regmap, IRQSTEER_CHANnCTL, 0); + } + spin_unlock_irqrestore(&dpu->intsteer_lock, flags); + *ct->mask_cache &= ~mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_gc_unlock(gc); +} + +static void +dpu_inner_intsteer_enable_line(struct dpu_soc *dpu, unsigned int line) +{ + unsigned int offset = LINE_TO_MASK_OFFSET(line); + unsigned int shift = LINE_TO_MASK_SHIFT(line); + + regmap_update_bits(dpu->intsteer_regmap, IRQSTEER_CHANnMASK(offset), + BIT(shift), BIT(shift)); +} + +static void dpu_inner_intsteer_enable_lines(struct dpu_soc *dpu) +{ + const struct dpu_devtype *devtype = dpu->devtype; + int i; + + for (i = 0; i < devtype->intsteer_map_size; i++) { + if (devtype->intsteer_map[i] == NA) + continue; + + dpu_inner_intsteer_enable_line(dpu, devtype->intsteer_map[i]); + } +} + +static int dpu_inner_irq_init(struct dpu_soc *dpu) +{ + const struct dpu_devtype *devtype = dpu->devtype; + const struct cm_reg_ofs *ofs = devtype->cm_reg_ofs; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + int ret, i; + + dpu_inner_intsteer_enable_lines(dpu); + + dpu->domain = irq_domain_add_linear(dpu->dev->of_node, + devtype->intsteer_map_size, + &irq_generic_chip_ops, dpu); + if (!dpu->domain) { + dev_err(dpu->dev, "failed to add irq domain\n"); + return -ENODEV; + } + + ret = irq_alloc_domain_generic_chips(dpu->domain, 32, 1, "DPU", + handle_level_irq, 0, 0, 0); + if (ret < 0) { + dev_err(dpu->dev, "failed to alloc generic irq chips\n"); + irq_domain_remove(dpu->domain); + return ret; + } + + for (i = 0; i < devtype->intsteer_map_size; i += 32) { + /* Mask and clear all interrupts */ + dpu_cm_write(dpu, 0, + USERINTERRUPTENABLE(ofs, i / 32)); + dpu_cm_write(dpu, ~devtype->unused_irq[i / 32], + USERINTERRUPTCLEAR(ofs, i / 32)); + dpu_cm_write(dpu, 0, + INTERRUPTENABLE(ofs, i / 32)); + dpu_cm_write(dpu, ~devtype->unused_irq[i / 32], + INTERRUPTCLEAR(ofs, i / 32)); + + /* Set all interrupts to user mode */ + dpu_cm_write(dpu, ~devtype->unused_irq[i / 32], + USERINTERRUPTMASK(ofs, i / 32)); + + gc = irq_get_domain_generic_chip(dpu->domain, i); + gc->reg_base = dpu->cm_reg; + gc->unused = devtype->unused_irq[i / 32]; + ct = gc->chip_types; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = dpu_inner_irq_gc_mask_clr_bit; + ct->chip.irq_unmask = dpu_inner_irq_gc_mask_set_bit; + ct->regs.ack = USERINTERRUPTCLEAR(ofs, i / 32); + ct->regs.mask = USERINTERRUPTENABLE(ofs, i / 32); + } + +#define DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(name) \ +irq_set_chained_handler_and_data(dpu->irq_##name, dpu_##name##_irq_handler, dpu) + + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(cm); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(stream0a); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(stream1a); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(reserved0); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(reserved1); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA1(blit); + + return 0; +} + +static void dpu_inner_irq_exit(struct dpu_soc *dpu) +{ + const struct dpu_devtype *devtype = dpu->devtype; + unsigned int i, irq; + +#define DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(name) \ +irq_set_chained_handler_and_data(dpu->irq_##name, NULL, NULL) + + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(cm); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(stream0a); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(stream1a); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(reserved0); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(reserved1); + DPU_INNER_IRQ_SET_CHAINED_HANDLER_AND_DATA2(blit); + + for (i = 0; i < devtype->intsteer_map_size; i++) { + irq = irq_linear_revmap(dpu->domain, i); + if (irq) + irq_dispose_mapping(irq); + } + + irq_domain_remove(dpu->domain); +} + +static irqreturn_t dpu_dpr0_irq_handler(int irq, void *desc) +{ + struct dpu_soc *dpu = desc; + const struct dpu_unit *fls = dpu->devtype->fls; + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < fls->num; i++) { + fu = dpu->fl_priv[i]; + dprc_irq_handle(fu->dprc); + } + + return IRQ_HANDLED; +} + +static irqreturn_t dpu_dpr1_irq_handler(int irq, void *desc) +{ + struct dpu_soc *dpu = desc; + const struct dpu_unit *fds = dpu->devtype->fds; + const struct dpu_unit *fws = dpu->devtype->fws; + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < fds->num; i++) { + fu = dpu->fd_priv[i]; + dprc_irq_handle(fu->dprc); + } + + for (i = 0; i < fws->num; i++) { + fu = dpu->fw_priv[i]; + dprc_irq_handle(fu->dprc); + } + + return IRQ_HANDLED; +} + +static void dpu_debug_ip_identity(struct dpu_soc *dpu) +{ + struct device *dev = dpu->dev; + const struct cm_reg_ofs *ofs = dpu->devtype->cm_reg_ofs; + u32 reg; + int id = 0; + + reg = dpu_cm_read(dpu, IPIDENTIFIER(ofs)); + + dev_dbg(dev, "%d) Maturatiy level:\n", ++id); + switch (reg & DESIGNMATURITYLEVEL_MASK) { + case DESIGNMATURITYLEVEL__PREFS: + dev_dbg(dev, "\tPre feasibility study.\n"); + break; + case DESIGNMATURITYLEVEL__FS: + dev_dbg(dev, "\tFeasibility study.\n"); + break; + case DESIGNMATURITYLEVEL__R0: + dev_dbg(dev, "\tFunctionality complete.\n"); + break; + case DESIGNMATURITYLEVEL__R1: + dev_dbg(dev, "\tVerification complete.\n"); + break; + default: + dev_dbg(dev, "\tUnknown.\n"); + break; + } + + dev_dbg(dev, "%d) IP feature set:\n", ++id); + switch (reg & IPFEATURESET_MASK) { + case IPFEATURESET__E: + dev_dbg(dev, "\tMinimal functionality (Eco).\n"); + break; + case IPFEATURESET__L: + dev_dbg(dev, "\tReduced functionality (Light).\n"); + break; + case IPFEATURESET__P: + dev_dbg(dev, "\tAdvanced functionality (Plus).\n"); + break; + case IPFEATURESET__X: + dev_dbg(dev, "\tExtensive functionality (eXtensive).\n"); + break; + default: + dev_dbg(dev, "\tUnknown.\n"); + break; + } + + dev_dbg(dev, "%d) IP application:\n", ++id); + switch (reg & IPAPPLICATION_MASK) { + case IPAPPLICATION__B: + dev_dbg(dev, "\tBlit engine only.\n"); + break; + case IPAPPLICATION__D: + dev_dbg(dev, "\tBlit engine and display controller.\n"); + break; + case IPAPPLICATION__V: + dev_dbg(dev, "\tDisplay controller only " + "(with direct capture).\n"); + break; + case IPAPPLICATION__G: + dev_dbg(dev, "\tBlit engine, display controller " + "(with direct capture),\n" + "\tcapture controller (buffered capture) " + "and drawing engine.\n"); + break; + case IPAPPLICATION__C: + dev_dbg(dev, "\tDisplay controller only.\n"); + break; + default: + dev_dbg(dev, "\tUnknown.\n"); + break; + } + + dev_dbg(dev, "%d) IP configuration:\n", ++id); + switch (reg & IPCONFIGURATION_MASK) { + case IPCONFIGURATION__M: + dev_dbg(dev, "\tGraphics core only (Module).\n"); + break; + case IPCONFIGURATION__S: + dev_dbg(dev, "\tSubsystem including a graphics core " + "(System).\n"); + break; + default: + dev_dbg(dev, "\tUnknown.\n"); + break; + } + + dev_dbg(dev, "%d) IP family:\n", ++id); + switch (reg & IPFAMILY_MASK) { + case IPFAMILY__IMXDPU2010: + dev_dbg(dev, "\tBuilding block generation 2010.\n"); + break; + case IPFAMILY__IMXDPU2012: + dev_dbg(dev, "\tBuilding block generation 2012.\n"); + break; + case IPFAMILY__IMXDPU2013: + dev_dbg(dev, "\tBuilding block generation 2013.\n"); + break; + default: + dev_dbg(dev, "\tUnknown.\n"); + break; + } +} + +/* FIXME: initialize pixel link in a proper manner */ +static void dpu_pixel_link_init(int id) +{ + sc_err_t sciErr; + sc_ipc_t ipcHndl = 0; + u32 mu_id; + + sciErr = sc_ipc_getMuID(&mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("Cannot obtain MU ID\n"); + return; + } + + sciErr = sc_ipc_open(&ipcHndl, mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("sc_ipc_open failed! (sciError = %d)\n", sciErr); + return; + } + + if (id == 0) { + /* SC_C_KACHUNK_CNT is for blit */ + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_KACHUNK_CNT, 32); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_KACHUNK_CNT sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST1_ADDR, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST1_ADDR sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST1_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST1_ENB sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST1_VLD, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST1_VLD sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST2_ADDR, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST2_ADDR sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST2_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST2_ENB sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_PXL_LINK_MST2_VLD, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST2_VLD sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_SYNC_CTRL0, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_SYNC_CTRL0 sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, SC_C_SYNC_CTRL1, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_SYNC_CTRL1 sc_misc_set_control failed! (sciError = %d)\n", sciErr); + } else if (id == 1) { + /* SC_C_KACHUNK_CNT is for blit */ + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_KACHUNK_CNT, 32); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_KACHUNK_CNT sc_misc_set_control failed! (sciError = %d)\n", sciErr); + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST1_ADDR, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST1_ADDR sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST1_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST1_ENB sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST1_VLD, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST1_VLD sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST2_ADDR, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST2_ADDR sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST2_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST2_ENB sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_PXL_LINK_MST2_VLD, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST2_VLD sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_SYNC_CTRL0, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_SYNC_CTRL0 sc_misc_set_control failed! (sciError = %d)\n", sciErr); + + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, SC_C_SYNC_CTRL1, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_SYNC_CTRL1 sc_misc_set_control failed! (sciError = %d)\n", sciErr); + } + + sc_ipc_close(mu_id); +} + +static int dpu_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(dpu_dt_ids, &pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct dpu_soc *dpu; + struct resource *res; + unsigned long dpu_base; + const struct dpu_devtype *devtype; + int ret; + + devtype = of_id->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + dpu_base = res->start; + + dpu = devm_kzalloc(&pdev->dev, sizeof(*dpu), GFP_KERNEL); + if (!dpu) + return -ENODEV; + + dpu->dev = &pdev->dev; + dpu->devtype = devtype; + dpu->id = of_alias_get_id(np, "dpu"); + + /* inner irqs */ + dpu->irq_cm = platform_get_irq(pdev, 0); + dpu->irq_stream0a = platform_get_irq(pdev, 1); + dpu->irq_stream1a = platform_get_irq(pdev, 3); + dpu->irq_reserved0 = platform_get_irq(pdev, 5); + dpu->irq_reserved1 = platform_get_irq(pdev, 6); + dpu->irq_blit = platform_get_irq(pdev, 7); + + dev_dbg(dpu->dev, "irq_cm: %d\n", dpu->irq_cm); + dev_dbg(dpu->dev, "irq_stream0a: %d, irq_stream1a: %d\n", + dpu->irq_stream0a, dpu->irq_stream1a); + dev_dbg(dpu->dev, "irq_reserved0: %d, irq_reserved1: %d\n", + dpu->irq_reserved0, dpu->irq_reserved1); + dev_dbg(dpu->dev, "irq_blit: %d\n", dpu->irq_blit); + + if (dpu->irq_cm < 0 || + dpu->irq_stream0a < 0 || dpu->irq_stream1a < 0 || + dpu->irq_reserved0 < 0 || dpu->irq_reserved1 < 0 || + dpu->irq_blit < 0) + return -ENODEV; + + dpu->intsteer_regmap = syscon_regmap_lookup_by_phandle(np, "intsteer"); + if (IS_ERR(dpu->intsteer_regmap)) { + dev_err(dpu->dev, "failed to get intsteer regmap\n"); + return PTR_ERR(dpu->intsteer_regmap); + } + + /* DPR irqs */ + if (dpu->devtype->has_prefetch) { + dpu->irq_dpr0 = platform_get_irq(pdev, 8); + dpu->irq_dpr1 = platform_get_irq(pdev, 9); + + dev_dbg(dpu->dev, "irq_dpr0: %d\n", dpu->irq_dpr0); + dev_dbg(dpu->dev, "irq_dpr1: %d\n", dpu->irq_dpr1); + + if (dpu->irq_dpr0 < 0 || dpu->irq_dpr1 < 0) + return -ENODEV; + + ret = devm_request_irq(dpu->dev, dpu->irq_dpr0, + dpu_dpr0_irq_handler, 0, pdev->name, dpu); + if (ret) { + dev_err(dpu->dev, "request dpr0 interrupt failed\n"); + return ret; + } + + ret = devm_request_irq(dpu->dev, dpu->irq_dpr1, + dpu_dpr1_irq_handler, 0, pdev->name, dpu); + if (ret) { + dev_err(dpu->dev, "request dpr1 interrupt failed\n"); + return ret; + } + } + + spin_lock_init(&dpu->lock); + spin_lock_init(&dpu->intsteer_lock); + + dev_dbg(dpu->dev, "Common: 0x%08lx\n", dpu_base + devtype->cm_ofs); + DPU_UNITS_ADDR_DBG(cf); + DPU_UNITS_ADDR_DBG(dec); + DPU_UNITS_ADDR_DBG(ed); + DPU_UNITS_ADDR_DBG(fd); + DPU_UNITS_ADDR_DBG(fe); + DPU_UNITS_ADDR_DBG(fg); + DPU_UNITS_ADDR_DBG(fl); + DPU_UNITS_ADDR_DBG(fw); + DPU_UNITS_ADDR_DBG(hs); + DPU_UNITS_ADDR_DBG(lb); + DPU_UNITS_ADDR_DBG(st); + DPU_UNITS_ADDR_DBG(tcon); + DPU_UNITS_ADDR_DBG(vs); + + dpu->cm_reg = devm_ioremap(dpu->dev, dpu_base + devtype->cm_ofs, SZ_1K); + if (!dpu->cm_reg) + return -ENOMEM; + + platform_set_drvdata(pdev, dpu); + + ret = dpu_inner_irq_init(dpu); + if (ret) + goto failed_inner_irq; + + ret = dpu_submodules_init(dpu, pdev, dpu_base); + if (ret) + goto failed_submodules_init; + + ret = dpu_add_client_devices(dpu); + if (ret) { + dev_err(dpu->dev, "adding client devices failed with %d\n", + ret); + goto failed_add_clients; + } + + dpu_debug_ip_identity(dpu); + + if (devtype->pixel_link_quirks) + dpu_pixel_link_init(dpu->id); + + dev_info(dpu->dev, "driver probed\n"); + + return 0; + +failed_add_clients: +failed_submodules_init: + dpu_inner_irq_exit(dpu); +failed_inner_irq: + return ret; +} + +static int dpu_remove(struct platform_device *pdev) +{ + struct dpu_soc *dpu = platform_get_drvdata(pdev); + + platform_device_unregister_children(pdev); + dpu_inner_irq_exit(dpu); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dpu_suspend(struct device *dev) +{ + /* + * The dpu core driver currently depends on the client drivers + * to do suspend operations to leave dpu a cleaned up state + * machine status before the system enters sleep mode. + */ + return 0; +} + +static int dpu_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dpu_soc *dpu = platform_get_drvdata(pdev); + + dpu_inner_intsteer_enable_lines(dpu); + + if (dpu->devtype->pixel_link_quirks) + dpu_pixel_link_init(dpu->id); + + _dpu_submodules_init(dpu, pdev); + + return 0; +} +#endif + +static const struct dev_pm_ops dpu_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(dpu_suspend, dpu_resume) +}; + +static struct platform_driver dpu_driver = { + .driver = { + .pm = &dpu_pm_ops, + .name = "dpu-core", + .of_match_table = dpu_dt_ids, + }, + .probe = dpu_probe, + .remove = dpu_remove, +}; + +module_platform_driver(dpu_driver); + +MODULE_DESCRIPTION("i.MX DPU driver"); +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/imx/dpu/dpu-constframe.c b/drivers/gpu/imx/dpu/dpu-constframe.c new file mode 100644 index 0000000000000000000000000000000000000000..9823858149bb8f28febdc6c48cbee3227713d139 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-constframe.c @@ -0,0 +1,283 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +static unsigned int safety_stream_cf_color = 0x0; +module_param(safety_stream_cf_color, uint, 0444); +MODULE_PARM_DESC(safety_stream_cf_color, +"Safety stream constframe color in hex(0xRRGGBBAA) [default=0x00000000]"); + +#define FRAMEDIMENSIONS 0xC +#define WIDTH(w) (((w) - 1) & 0x3FFF) +#define HEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define CONSTANTCOLOR 0x10 +#define RED(r) (((r) & 0xFF) << 24) +#define GREEN(g) (((g) & 0xFF) << 16) +#define BLUE(b) (((b) & 0xFF) << 8) +#define ALPHA(a) ((a) & 0xFF) +#define CONTROLTRIGGER 0x14 +#define START 0x18 +#define STATUS 0x1C + +static const shadow_load_req_t cf_shdlreqs[] = { + SHLDREQID_CONSTFRAME0, SHLDREQID_CONSTFRAME1, + SHLDREQID_CONSTFRAME4, SHLDREQID_CONSTFRAME5, +}; + +struct dpu_constframe { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + shadow_load_req_t shdlreq; +}; + +static inline u32 dpu_cf_read(struct dpu_constframe *cf, unsigned int offset) +{ + return readl(cf->base + offset); +} + +static inline void dpu_cf_write(struct dpu_constframe *cf, u32 value, + unsigned int offset) +{ + writel(value, cf->base + offset); +} + +void constframe_shden(struct dpu_constframe *cf, bool enable) +{ + u32 val; + + val = enable ? SHDEN : 0; + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, val, STATICCONTROL); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_shden); + +void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w, + unsigned int h) +{ + u32 val; + + val = WIDTH(w) | HEIGHT(h); + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, val, FRAMEDIMENSIONS); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_framedimensions); + +void constframe_framedimensions_copy_prim(struct dpu_constframe *cf) +{ + struct dpu_constframe *prim_cf = NULL; + unsigned int prim_id; + int i; + u32 val; + + if (cf->id != 0 && cf->id != 1) { + dev_warn(cf->dpu->dev, "ConstFrame%d is not a secondary one\n", + cf->id); + return; + } + + prim_id = cf->id + 4; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == prim_id) + prim_cf = cf->dpu->cf_priv[i]; + + if (!prim_cf) { + dev_warn(cf->dpu->dev, "cannot find ConstFrame%d's primary peer\n", + cf->id); + return; + } + + mutex_lock(&cf->mutex); + val = dpu_cf_read(prim_cf, FRAMEDIMENSIONS); + dpu_cf_write(cf, val, FRAMEDIMENSIONS); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_framedimensions_copy_prim); + +void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r, + unsigned int g, unsigned int b, unsigned int a) +{ + u32 val; + + val = RED(r) | GREEN(g) | BLUE(b) | ALPHA(a); + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, val, CONSTANTCOLOR); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_constantcolor); + +void constframe_controltrigger(struct dpu_constframe *cf, bool trigger) +{ + u32 val; + + val = trigger ? SHDTOKGEN : 0; + + mutex_lock(&cf->mutex); + dpu_cf_write(cf, val, CONTROLTRIGGER); + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(constframe_controltrigger); + +shadow_load_req_t constframe_to_shdldreq_t(struct dpu_constframe *cf) +{ + shadow_load_req_t t = 0; + + switch (cf->id) { + case 0: + t = SHLDREQID_CONSTFRAME0; + break; + case 1: + t = SHLDREQID_CONSTFRAME1; + break; + case 4: + t = SHLDREQID_CONSTFRAME4; + break; + case 5: + t = SHLDREQID_CONSTFRAME5; + break; + } + + return t; +} +EXPORT_SYMBOL_GPL(constframe_to_shdldreq_t); + +struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id) +{ + struct dpu_constframe *cf; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (i == ARRAY_SIZE(cf_ids)) + return ERR_PTR(-EINVAL); + + cf = dpu->cf_priv[i]; + + mutex_lock(&cf->mutex); + + if (cf->inuse) { + mutex_unlock(&cf->mutex); + return ERR_PTR(-EBUSY); + } + + cf->inuse = true; + + mutex_unlock(&cf->mutex); + + return cf; +} +EXPORT_SYMBOL_GPL(dpu_cf_get); + +void dpu_cf_put(struct dpu_constframe *cf) +{ + mutex_lock(&cf->mutex); + + cf->inuse = false; + + mutex_unlock(&cf->mutex); +} +EXPORT_SYMBOL_GPL(dpu_cf_put); + +struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf) +{ + unsigned int aux_id = cf->id ^ 1; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == aux_id) + return cf->dpu->cf_priv[i]; + + return NULL; +} +EXPORT_SYMBOL_GPL(dpu_aux_cf_peek); + +void _dpu_cf_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_constframe *cf; + int i; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(cf_ids))) + return; + + cf = dpu->cf_priv[i]; + + constframe_shden(cf, true); + + if (id == 4 || id == 5) { + mutex_lock(&cf->mutex); + dpu_cf_write(cf, safety_stream_cf_color, CONSTANTCOLOR); + mutex_unlock(&cf->mutex); + } +} + +int dpu_cf_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_constframe *cf; + int i; + + cf = devm_kzalloc(dpu->dev, sizeof(*cf), GFP_KERNEL); + if (!cf) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(cf_ids); i++) + if (cf_ids[i] == id) + break; + + if (i == ARRAY_SIZE(cf_ids)) + return -EINVAL; + + dpu->cf_priv[i] = cf; + + cf->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!cf->pec_base) + return -ENOMEM; + + cf->base = devm_ioremap(dpu->dev, base, SZ_32); + if (!cf->base) + return -ENOMEM; + + cf->dpu = dpu; + cf->id = id; + cf->shdlreq = cf_shdlreqs[i]; + + mutex_init(&cf->mutex); + + _dpu_cf_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-disengcfg.c b/drivers/gpu/imx/dpu/dpu-disengcfg.c new file mode 100644 index 0000000000000000000000000000000000000000..b4ce1704aa369a224a243e4b83e762168303dc56 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-disengcfg.c @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <drm/drm_mode.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include "dpu-prv.h" + +#define CLOCKCTRL 0x8 +typedef enum { + DSPCLKDIVIDE__DIV1, /* Ext disp clk signal has pix clk freq. */ + DSPCLKDIVIDE__DIV2, /* Ext disp clk signal has 2x the pix clk freq. */ +} clkdivide_t; +#define POLARITYCTRL 0xC +#define POLHS_HIGH BIT(0) +#define POLVS_HIGH BIT(1) +#define POLEN_HIGH BIT(2) +#define PIXINV_INV BIT(3) +#define SRCSELECT 0x10 + +struct dpu_disengcfg { + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_dec_read(struct dpu_disengcfg *dec, unsigned int offset) +{ + return readl(dec->base + offset); +} + +static inline void dpu_dec_write(struct dpu_disengcfg *dec, u32 value, + unsigned int offset) +{ + writel(value, dec->base + offset); +} + +void disengcfg_polarity_ctrl(struct dpu_disengcfg *dec, unsigned int flags) +{ + const struct dpu_devtype *devtype = dec->dpu->devtype; + u32 val; + + mutex_lock(&dec->mutex); + val = dpu_dec_read(dec, POLARITYCTRL); + if (devtype->pixel_link_nhvsync) { + val &= ~POLHS_HIGH; + val &= ~POLVS_HIGH; + } else { + if (flags & DRM_MODE_FLAG_PHSYNC) + val |= POLHS_HIGH; + if (flags & DRM_MODE_FLAG_NHSYNC) + val &= ~POLHS_HIGH; + if (flags & DRM_MODE_FLAG_PVSYNC) + val |= POLVS_HIGH; + if (flags & DRM_MODE_FLAG_NVSYNC) + val &= ~POLVS_HIGH; + } + dpu_dec_write(dec, val, POLARITYCTRL); + mutex_unlock(&dec->mutex); +} +EXPORT_SYMBOL_GPL(disengcfg_polarity_ctrl); + +struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id) +{ + struct dpu_disengcfg *dec; + int i; + + for (i = 0; i < ARRAY_SIZE(dec_ids); i++) + if (dec_ids[i] == id) + break; + + if (i == ARRAY_SIZE(dec_ids)) + return ERR_PTR(-EINVAL); + + dec = dpu->dec_priv[i]; + + mutex_lock(&dec->mutex); + + if (dec->inuse) { + mutex_unlock(&dec->mutex); + return ERR_PTR(-EBUSY); + } + + dec->inuse = true; + + mutex_unlock(&dec->mutex); + + return dec; +} +EXPORT_SYMBOL_GPL(dpu_dec_get); + +void dpu_dec_put(struct dpu_disengcfg *dec) +{ + mutex_lock(&dec->mutex); + + dec->inuse = false; + + mutex_unlock(&dec->mutex); +} +EXPORT_SYMBOL_GPL(dpu_dec_put); + +struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec) +{ + return dec->dpu->dec_priv[dec->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_dec_peek); + +void _dpu_dec_init(struct dpu_soc *dpu, unsigned int id) +{ +} + +int dpu_dec_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_disengcfg *dec; + + dec = devm_kzalloc(dpu->dev, sizeof(*dec), GFP_KERNEL); + if (!dec) + return -ENOMEM; + + dpu->dec_priv[id] = dec; + + dec->base = devm_ioremap(dpu->dev, base, SZ_16); + if (!dec->base) + return -ENOMEM; + + dec->dpu = dpu; + dec->id = id; + mutex_init(&dec->mutex); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-extdst.c b/drivers/gpu/imx/dpu/dpu-extdst.c new file mode 100644 index 0000000000000000000000000000000000000000..fe6b7e6343fea0756c9d09b59bddd2e2aaecdb66 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-extdst.c @@ -0,0 +1,546 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATIC 0x8 +#define POWERDOWN BIT(4) +#define SYNC_MODE BIT(8) +#define SW_RESET BIT(11) +#define DIV(n) (((n) & 0xFF) << 16) +#define DIV_RESET 0x80 +#define PIXENGCFG_DYNAMIC 0xC +#define PIXENGCFG_REQUEST 0x10 +#define SHDLDREQ(n) BIT(n) +#define SEL_SHDLDREQ BIT(0) +#define PIXENGCFG_TRIGGER 0x14 +#define SYNC_TRIGGER BIT(0) +#define TRIGGER_SEQUENCE_COMPLETE BIT(4) +#define PIXENGCFG_STATUS 0x18 +#define SYNC_BUSY BIT(8) +#define KICK_MODE BIT(8) +#define PERFCOUNTMODE BIT(12) +#define CONTROL 0xC +#define GAMMAAPPLYENABLE BIT(0) +#define SOFTWAREKICK 0x10 +#define KICK BIT(0) +#define STATUS 0x14 +#define CNT_ERR_STS BIT(0) +#define CONTROLWORD 0x18 +#define CURPIXELCNT 0x1C +static u16 get_xval(u32 pixel_cnt) +{ + return pixel_cnt & 0xFFFF; +} + +static u16 get_yval(u32 pixel_cnt) +{ + return pixel_cnt >> 16; +} +#define LASTPIXELCNT 0x20 +#define PERFCOUNTER 0x24 + +struct dpu_extdst { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_ed_read(struct dpu_extdst *ed, unsigned int offset) +{ + return readl(ed->pec_base + offset); +} + +static inline void dpu_pec_ed_write(struct dpu_extdst *ed, u32 value, + unsigned int offset) +{ + writel(value, ed->pec_base + offset); +} + +static inline u32 dpu_ed_read(struct dpu_extdst *ed, unsigned int offset) +{ + return readl(ed->base + offset); +} + +static inline void dpu_ed_write(struct dpu_extdst *ed, u32 value, + unsigned int offset) +{ + writel(value, ed->base + offset); +} + +static inline bool dpu_ed_is_safety_stream(struct dpu_extdst *ed) +{ + if (ed->id == 4 || ed->id == 5) + return true; + + return false; +} + +static inline bool dpu_ed_src_sel_is_extsrc(extdst_src_sel_t src) +{ + if (src == ED_SRC_EXTSRC4 || src == ED_SRC_EXTSRC5) + return true; + + return false; +} + +void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_shden); + +void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (powerdown) + val |= POWERDOWN; + else + val &= ~POWERDOWN; + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_powerdown); + +void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (mode == AUTO) + val |= SYNC_MODE; + else + val &= ~SYNC_MODE; + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_mode); + +void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (reset) + val |= SW_RESET; + else + val &= ~SW_RESET; + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_reset); + +void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + val &= ~0xFF0000; + val |= DIV(div); + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_div); + +void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable) +{ + struct dpu_soc *dpu = ed->dpu; + u32 val; + + if (!dpu->devtype->has_syncmode_fixup) + return; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC); + if (enable) + val |= BIT(16); + else + val &= ~BIT(16); + dpu_pec_ed_write(ed, val, PIXENGCFG_STATIC); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_syncmode_master); + +int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src) +{ + struct dpu_soc *dpu = ed->dpu; + const unsigned int *block_id_map = dpu->devtype->sw2hw_block_id_map; + u32 mapped_src; + + mapped_src = block_id_map ? block_id_map[src] : src; + if (WARN_ON(mapped_src == NA)) + return -EINVAL; + + if (dpu_ed_is_safety_stream(ed) && dpu_ed_src_sel_is_extsrc(src)) { + dev_err(dpu->dev, "ExtDst%d source cannot be ExtSrc\n", ed->id); + return -EINVAL; + } + + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, mapped_src, PIXENGCFG_DYNAMIC); + mutex_unlock(&ed->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_src_sel); + +void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST); + val |= SEL_SHDLDREQ; + dpu_pec_ed_write(ed, val, PIXENGCFG_REQUEST); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sel_shdldreq); + +void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST); + val |= req_mask; + dpu_pec_ed_write(ed, val, PIXENGCFG_REQUEST); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_shdldreq); + +void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, SYNC_TRIGGER, PIXENGCFG_TRIGGER); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_trigger); + +void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_pec_ed_write(ed, TRIGGER_SEQUENCE_COMPLETE, PIXENGCFG_TRIGGER); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_trigger_sequence_complete); + +bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS); + mutex_unlock(&ed->mutex); + + return val & SYNC_BUSY; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_is_sync_busy); + +ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS); + mutex_unlock(&ed->mutex); + + return val & 0x3; +} +EXPORT_SYMBOL_GPL(extdst_pixengcfg_pipeline_status); + +void extdst_shden(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_ed_write(ed, val, STATICCONTROL); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_shden); + +void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + val &= ~KICK_MODE; + val |= mode; + dpu_ed_write(ed, val, STATICCONTROL); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_kick_mode); + +void extdst_perfcountmode(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATICCONTROL); + if (enable) + val |= PERFCOUNTMODE; + else + val &= ~PERFCOUNTMODE; + dpu_ed_write(ed, val, STATICCONTROL); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_perfcountmode); + +void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CONTROL); + if (enable) + val |= GAMMAAPPLYENABLE; + else + val &= ~GAMMAAPPLYENABLE; + dpu_ed_write(ed, val, CONTROL); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_gamma_apply_enable); + +void extdst_kick(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_ed_write(ed, KICK, SOFTWAREKICK); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_kick); + +void extdst_cnt_err_clear(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + dpu_ed_write(ed, CNT_ERR_STS, STATUS); + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(extdst_cnt_err_clear); + +bool extdst_cnt_err_status(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, STATUS); + mutex_unlock(&ed->mutex); + + return val & CNT_ERR_STS; +} +EXPORT_SYMBOL_GPL(extdst_cnt_err_status); + +u32 extdst_last_control_word(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CONTROLWORD); + mutex_unlock(&ed->mutex); + + return val; +} +EXPORT_SYMBOL_GPL(extdst_last_control_word); + +void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, CURPIXELCNT); + mutex_unlock(&ed->mutex); + + *x = get_xval(val); + *y = get_yval(val); +} +EXPORT_SYMBOL_GPL(extdst_pixel_cnt); + +void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, LASTPIXELCNT); + mutex_unlock(&ed->mutex); + + *x = get_xval(val); + *y = get_yval(val); +} +EXPORT_SYMBOL_GPL(extdst_last_pixel_cnt); + +u32 extdst_perfresult(struct dpu_extdst *ed) +{ + u32 val; + + mutex_lock(&ed->mutex); + val = dpu_ed_read(ed, PERFCOUNTER); + mutex_unlock(&ed->mutex); + + return val; +} +EXPORT_SYMBOL_GPL(extdst_perfresult); + +bool extdst_is_master(struct dpu_extdst *ed) +{ + const struct dpu_devtype *devtype = ed->dpu->devtype; + + return ed->id == devtype->master_stream_id; +} +EXPORT_SYMBOL_GPL(extdst_is_master); + +struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id) +{ + struct dpu_extdst *ed; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (i == ARRAY_SIZE(ed_ids)) + return ERR_PTR(-EINVAL); + + ed = dpu->ed_priv[i]; + + mutex_lock(&ed->mutex); + + if (ed->inuse) { + mutex_unlock(&ed->mutex); + return ERR_PTR(-EBUSY); + } + + ed->inuse = true; + + mutex_unlock(&ed->mutex); + + return ed; +} +EXPORT_SYMBOL_GPL(dpu_ed_get); + +void dpu_ed_put(struct dpu_extdst *ed) +{ + mutex_lock(&ed->mutex); + + ed->inuse = false; + + mutex_unlock(&ed->mutex); +} +EXPORT_SYMBOL_GPL(dpu_ed_put); + +struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed) +{ + unsigned int aux_id = ed->id ^ 1; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == aux_id) + return ed->dpu->ed_priv[i]; + + return NULL; +} +EXPORT_SYMBOL_GPL(dpu_aux_ed_peek); + +void _dpu_ed_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_extdst *ed; + int i; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(ed_ids))) + return; + + ed = dpu->ed_priv[i]; + + extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE); + extdst_pixengcfg_shden(ed, true); + extdst_pixengcfg_powerdown(ed, false); + extdst_pixengcfg_sync_mode(ed, SINGLE); + extdst_pixengcfg_reset(ed, false); + extdst_pixengcfg_div(ed, DIV_RESET); + extdst_shden(ed, true); + extdst_perfcountmode(ed, false); + extdst_kick_mode(ed, EXTERNAL); +} + +int dpu_ed_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_extdst *ed; + int ret, i; + + ed = devm_kzalloc(dpu->dev, sizeof(*ed), GFP_KERNEL); + if (!ed) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ed_ids); i++) + if (ed_ids[i] == id) + break; + + if (i == ARRAY_SIZE(ed_ids)) + return -EINVAL; + + dpu->ed_priv[i] = ed; + + ed->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32); + if (!ed->pec_base) + return -ENOMEM; + + ed->base = devm_ioremap(dpu->dev, base, SZ_64); + if (!ed->base) + return -ENOMEM; + + ed->dpu = dpu; + ed->id = id; + mutex_init(&ed->mutex); + + ret = extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE); + if (ret < 0) + return ret; + + _dpu_ed_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchdecode.c b/drivers/gpu/imx/dpu/dpu-fetchdecode.c new file mode 100644 index 0000000000000000000000000000000000000000..cf385a7110386b70c2e3f184d140fa3dd7ea151c --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchdecode.c @@ -0,0 +1,778 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define FD_NUM_V1 4 +#define FD_NUM_V2 2 + +static const u32 fd_vproc_cap_v1[FD_NUM_V1] = { + DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 | + DPU_VPROC_CAP_FETCHECO0, + DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 | + DPU_VPROC_CAP_FETCHECO1, + DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 | + DPU_VPROC_CAP_FETCHECO0, + DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 | + DPU_VPROC_CAP_FETCHECO1, +}; + +static const u32 fd_vproc_cap_v2[FD_NUM_V2] = { + DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 | + DPU_VPROC_CAP_FETCHECO0, + DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 | + DPU_VPROC_CAP_FETCHECO1, +}; + +#define PIXENGCFG_DYNAMIC 0x8 +#define SRC_NUM_V1 3 +#define SRC_NUM_V2 4 +static const fd_dynamic_src_sel_t fd_srcs_v1[FD_NUM_V1][SRC_NUM_V1] = { + { FD_SRC_DISABLE, FD_SRC_FETCHECO0, FD_SRC_FETCHDECODE2 }, + { FD_SRC_DISABLE, FD_SRC_FETCHECO1, FD_SRC_FETCHDECODE3 }, + { FD_SRC_DISABLE, FD_SRC_FETCHECO0, FD_SRC_FETCHECO2 }, + { FD_SRC_DISABLE, FD_SRC_FETCHECO1, FD_SRC_FETCHECO2 }, +}; + +static const fd_dynamic_src_sel_t fd_srcs_v2[FD_NUM_V2][SRC_NUM_V2] = { + { + FD_SRC_DISABLE, FD_SRC_FETCHECO0, + FD_SRC_FETCHDECODE1, FD_SRC_FETCHWARP2 + }, { + FD_SRC_DISABLE, FD_SRC_FETCHECO1, + FD_SRC_FETCHDECODE0, FD_SRC_FETCHWARP2 + }, +}; + +#define PIXENGCFG_STATUS 0xC + +#define RINGBUFSTARTADDR0 0x10 +#define RINGBUFWRAPADDR0 0x14 +#define FRAMEPROPERTIES0 0x18 +#define BASEADDRESS0 0x1C +#define SOURCEBUFFERATTRIBUTES0 0x20 +#define SOURCEBUFFERDIMENSION0 0x24 +#define COLORCOMPONENTBITS0 0x28 +#define COLORCOMPONENTSHIFT0 0x2C +#define LAYEROFFSET0 0x30 +#define CLIPWINDOWOFFSET0 0x34 +#define CLIPWINDOWDIMENSIONS0 0x38 +#define CONSTANTCOLOR0 0x3C +#define LAYERPROPERTY0 0x40 +#define FRAMEDIMENSIONS 0x44 +#define FRAMERESAMPLING 0x48 +#define DECODECONTROL 0x4C +#define SOURCEBUFFERLENGTH 0x50 +#define CONTROL 0x54 +#define CONTROLTRIGGER 0x58 +#define START 0x5C +#define FETCHTYPE 0x60 +#define DECODERSTATUS 0x64 +#define READADDRESS0 0x68 +#define BURSTBUFFERPROPERTIES 0x6C +#define STATUS 0x70 +#define HIDDENSTATUS 0x74 + +static const shadow_load_req_t fd_shdlreqs[] = { + SHLDREQID_FETCHDECODE0, SHLDREQID_FETCHDECODE1, + SHLDREQID_FETCHDECODE2, SHLDREQID_FETCHDECODE3, +}; + +struct dpu_fetchdecode { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; + shadow_load_req_t shdlreq; +}; + +int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu, + fd_dynamic_src_sel_t src) +{ + struct dpu_soc *dpu = fu->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + int i; + + mutex_lock(&fu->mutex); + if (devtype->version == DPU_V1) { + for (i = 0; i < SRC_NUM_V1; i++) { + if (fd_srcs_v1[fu->id][i] == src) { + dpu_pec_fu_write(fu, src, PIXENGCFG_DYNAMIC); + mutex_unlock(&fu->mutex); + return 0; + } + } + } else if (devtype->version == DPU_V2) { + const unsigned int *block_id_map = devtype->sw2hw_block_id_map; + u32 mapped_src; + + if (WARN_ON(!block_id_map)) + return -EINVAL; + + for (i = 0; i < SRC_NUM_V2; i++) { + if (fd_srcs_v2[fu->id][i] == src) { + mapped_src = block_id_map[src]; + if (WARN_ON(mapped_src == NA)) + return -EINVAL; + + dpu_pec_fu_write(fu, mapped_src, + PIXENGCFG_DYNAMIC); + mutex_unlock(&fu->mutex); + return 0; + } + } + } else { + WARN_ON(1); + } + mutex_unlock(&fu->mutex); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fetchdecode_pixengcfg_dynamic_src_sel); + +static void +fetchdecode_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr) +{ + unsigned int burst_size, stride; + bool nonzero_mod = !!mt_w; + + if (nonzero_mod) { + /* consider PRG x offset to calculate buffer address */ + baddr += (x_offset % mt_w) * (bpp / 8); + + /* + * address TKT343664: + * fetch unit base address has to align to burst size + */ + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + + stride = width * (bpp >> 3); + /* + * address TKT339017: + * fixup for burst size vs stride mismatch + */ + stride = round_up(stride + round_up(baddr % 8, 8), burst_size); + + /* consider PRG y offset to calculate buffer address */ + baddr += (y_offset % mt_h) * stride; + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, baddr, BASEADDRESS0); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_set_src_bpp(struct dpu_fetchunit *fu, int bpp) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0); + val &= ~0x3f0000; + val |= BITSPERPIXEL(bpp); + dpu_fu_write(fu, val, SOURCEBUFFERATTRIBUTES0); + mutex_unlock(&fu->mutex); +} + +static void +fetchdecode_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch) +{ + unsigned int burst_size; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + /* + * address TKT343664: + * fetch unit base address has to align to burst size + */ + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + + stride = width * (bpp >> 3); + /* + * address TKT339017: + * fixup for burst size vs stride mismatch + */ + if (nonzero_mod) + stride = round_up(stride + round_up(baddr % 8, 8), + burst_size); + else + stride = round_up(stride, burst_size); + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0); + val &= ~0xffff; + val |= STRIDE(stride); + dpu_fu_write(fu, val, SOURCEBUFFERATTRIBUTES0); + mutex_unlock(&fu->mutex); +} + +static void +fetchdecode_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused, bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, SOURCEBUFFERDIMENSION0); + mutex_unlock(&fu->mutex); +} + +static void +fetchdecode_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool deinterlace) +{ + u32 val, bits, shift; + bool is_planar_yuv = false, is_rastermode_yuv422 = false; + bool is_yuv422upsamplingmode_interpolate = false; + bool is_inputselect_compact = false; + bool need_csc = false; + int i; + + switch (fmt) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + is_rastermode_yuv422 = true; + is_yuv422upsamplingmode_interpolate = true; + need_csc = true; + break; + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + is_yuv422upsamplingmode_interpolate = true; + /* fall-through */ + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + if (deinterlace) + is_yuv422upsamplingmode_interpolate = true; + is_planar_yuv = true; + is_rastermode_yuv422 = true; + is_inputselect_compact = true; + need_csc = true; + break; + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + is_planar_yuv = true; + is_yuv422upsamplingmode_interpolate = true; + is_inputselect_compact = true; + need_csc = true; + break; + default: + break; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, CONTROL); + val &= ~YUV422UPSAMPLINGMODE_MASK; + val &= ~INPUTSELECT_MASK; + val &= ~RASTERMODE_MASK; + if (is_yuv422upsamplingmode_interpolate) + val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__INTERPOLATE); + else + val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__REPLICATE); + if (is_inputselect_compact) + val |= INPUTSELECT(INPUTSELECT__COMPPACK); + else + val |= INPUTSELECT(INPUTSELECT__INACTIVE); + if (is_rastermode_yuv422) + val |= RASTERMODE(RASTERMODE__YUV422); + else + val |= RASTERMODE(RASTERMODE__NORMAL); + dpu_fu_write(fu, val, CONTROL); + + val = dpu_fu_read(fu, LAYERPROPERTY0); + val &= ~YUVCONVERSIONMODE_MASK; + if (need_csc) + /* + * assuming fetchdecode always ouputs RGB pixel formats + * + * FIXME: + * determine correct standard here - ITU601 or ITU601_FR + * or ITU709 + */ + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601_FR); + else + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF); + dpu_fu_write(fu, val, LAYERPROPERTY0); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + if (is_planar_yuv) { + bits &= ~(U_BITS_MASK | V_BITS_MASK); + shift &= ~(U_SHIFT_MASK | V_SHIFT_MASK); + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, bits, COLORCOMPONENTBITS0); + dpu_fu_write(fu, shift, COLORCOMPONENTSHIFT0); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +void fetchdecode_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = LAYERXOFFSET(x) | LAYERYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, LAYEROFFSET0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_layeroffset); + +void fetchdecode_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CLIPWINDOWOFFSET0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_clipoffset); + +static void fetchdecode_enable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + val |= SOURCEBUFFERENABLE; + dpu_fu_write(fu, val, LAYERPROPERTY0); + mutex_unlock(&fu->mutex); +} + +static void fetchdecode_disable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + val &= ~SOURCEBUFFERENABLE; + dpu_fu_write(fu, val, LAYERPROPERTY0); + mutex_unlock(&fu->mutex); +} + +static bool fetchdecode_is_enabled(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY0); + mutex_unlock(&fu->mutex); + + return !!(val & SOURCEBUFFERENABLE); +} + +void fetchdecode_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h) +{ + u32 val; + + val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CLIPWINDOWDIMENSIONS0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_clipdimensions); + +static void +fetchdecode_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, FRAMEDIMENSIONS); + mutex_unlock(&fu->mutex); +} + +void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_rgb_constantcolor); + +void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchdecode_yuv_constantcolor); + +static void fetchdecode_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SHDTOKGEN, CONTROLTRIGGER); + mutex_unlock(&fu->mutex); +} + +int fetchdecode_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + dev_dbg(dpu->dev, "FetchDecode%d with RL and RLAD decoder\n", + fu->id); + break; + case FETCHTYPE__LAYER: + dev_dbg(dpu->dev, "FetchDecode%d with fractional " + "plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__WARP: + dev_dbg(dpu->dev, "FetchDecode%d with arbitrary warping and " + "fractional plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__ECO: + dev_dbg(dpu->dev, "FetchDecode%d with minimum feature set for " + "alpha, chroma and coordinate planes\n", + fu->id); + break; + case FETCHTYPE__PERSP: + dev_dbg(dpu->dev, "FetchDecode%d with affine, perspective and " + "arbitrary warping\n", fu->id); + break; + case FETCHTYPE__ROT: + dev_dbg(dpu->dev, "FetchDecode%d with affine and arbitrary " + "warping\n", fu->id); + break; + case FETCHTYPE__DECODEL: + dev_dbg(dpu->dev, "FetchDecode%d with RL and RLAD decoder, " + "reduced feature set\n", fu->id); + break; + case FETCHTYPE__LAYERL: + dev_dbg(dpu->dev, "FetchDecode%d with fractional " + "plane(8 layers), reduced feature set\n", + fu->id); + break; + case FETCHTYPE__ROTL: + dev_dbg(dpu->dev, "FetchDecode%d with affine and arbitrary " + "warping, reduced feature set\n", fu->id); + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchDecode%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchdecode_fetchtype); + +shadow_load_req_t fetchdecode_to_shdldreq_t(struct dpu_fetchunit *fu) +{ + shadow_load_req_t t = 0; + + switch (fu->id) { + case 0: + t = SHLDREQID_FETCHDECODE0; + break; + case 1: + t = SHLDREQID_FETCHDECODE1; + break; + case 2: + t = SHLDREQID_FETCHDECODE2; + break; + case 3: + t = SHLDREQID_FETCHDECODE3; + break; + default: + break; + } + + return t; +} +EXPORT_SYMBOL_GPL(fetchdecode_to_shdldreq_t); + +u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + + return devtype->version == DPU_V1 ? + fd_vproc_cap_v1[fu->id] : fd_vproc_cap_v2[fu->id]; +} +EXPORT_SYMBOL_GPL(fetchdecode_get_vproc_mask); + +struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 1: + return dpu->fe_priv[fu->id]; + case 2: + case 3: + /* TODO: for DPU v1, add FetchEco2 support */ + return dpu->fe_priv[fu->id - 2]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_fetcheco); + +bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fu, u32 fmt) +{ + struct dpu_fetchunit *fe = fetchdecode_get_fetcheco(fu); + + if (IS_ERR_OR_NULL(fe)) + return false; + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(fetchdecode_need_fetcheco); + +struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 2: + return dpu->hs_priv[0]; + case 1: + case 3: + return dpu->hs_priv[1]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_hscaler); + +struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu) +{ + struct dpu_soc *dpu = fu->dpu; + + switch (fu->id) { + case 0: + case 2: + return dpu->vs_priv[0]; + case 1: + case 3: + return dpu->vs_priv[1]; + default: + WARN_ON(1); + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(fetchdecode_get_vscaler); + +struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fd_ids); i++) + if (fd_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fd_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fd_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fd_get); + +void dpu_fd_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fd_put); + +static const struct dpu_fetchunit_ops fd_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchdecode_set_baseaddress, + .set_src_bpp = fetchdecode_set_src_bpp, + .set_src_stride = fetchdecode_set_src_stride, + .set_src_buf_dimensions = fetchdecode_set_src_buf_dimensions, + .set_fmt = fetchdecode_set_fmt, + .enable_src_buf = fetchdecode_enable_src_buf, + .disable_src_buf = fetchdecode_disable_src_buf, + .is_enabled = fetchdecode_is_enabled, + .set_framedimensions = fetchdecode_set_framedimensions, + .set_controltrigger = fetchdecode_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fd_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fd_ids); i++) + if (fd_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fd_ids))) + return; + + fu = dpu->fd_priv[i]; + + fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE); + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SETNUMBUFFERS(16) | SETBURSTLENGTH(16), + BURSTBUFFERMANAGEMENT); + mutex_unlock(&fu->mutex); +} + +int dpu_fd_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchdecode *fd; + struct dpu_fetchunit *fu; + int ret, i; + + fd = devm_kzalloc(dpu->dev, sizeof(*fd), GFP_KERNEL); + if (!fd) + return -ENOMEM; + + fu = &fd->fu; + dpu->fd_priv[id] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->type = FU_T_FD; + fu->ops = &fd_ops; + fu->name = "fetchdecode"; + for (i = 0; i < ARRAY_SIZE(fd_ids); i++) { + if (fd_ids[i] == id) { + fd->shdlreq = fd_shdlreqs[i]; + break; + } + } + mutex_init(&fu->mutex); + + ret = fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE); + if (ret < 0) + return ret; + + ret = fetchdecode_fetchtype(fu, &fd->fetchtype); + if (ret < 0) + return ret; + + _dpu_fd_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetcheco.c b/drivers/gpu/imx/dpu/dpu-fetcheco.c new file mode 100644 index 0000000000000000000000000000000000000000..dc662f358eca4870d674f146640cd3fabfadd858 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetcheco.c @@ -0,0 +1,434 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define BASEADDRESS0 0x10 +#define SOURCEBUFFERATTRIBUTES0 0x14 +#define SOURCEBUFFERDIMENSION0 0x18 +#define COLORCOMPONENTBITS0 0x1C +#define COLORCOMPONENTSHIFT0 0x20 +#define LAYEROFFSET0 0x24 +#define CLIPWINDOWOFFSET0 0x28 +#define CLIPWINDOWDIMENSIONS0 0x2C +#define CONSTANTCOLOR0 0x30 +#define LAYERPROPERTY0 0x34 +#define FRAMEDIMENSIONS 0x38 +#define FRAMERESAMPLING 0x3C +#define CONTROL 0x40 +#define CONTROLTRIGGER 0x44 +#define START 0x48 +#define FETCHTYPE 0x4C +#define BURSTBUFFERPROPERTIES 0x50 +#define HIDDENSTATUS 0x54 + +struct dpu_fetcheco { + struct dpu_fetchunit fu; +}; + +static void +fetcheco_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 fmt, bool deinterlace) +{ + int width, height; + u32 val; + + if (deinterlace) { + width = w; + height = h / 2; + } else { + width = dpu_format_plane_width(w, fmt, 1); + height = dpu_format_plane_height(h, fmt, 1); + } + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + break; + default: + WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt); + return; + } + + val = LINEWIDTH(width) | LINECOUNT(height); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, SOURCEBUFFERDIMENSION0); + mutex_unlock(&fu->mutex); +} + +static void fetcheco_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool unused) +{ + u32 val, bits, shift; + int i, hsub, vsub; + unsigned int x, y; + + switch (fmt) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + break; + default: + WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt); + return; + } + + hsub = dpu_format_horz_chroma_subsampling(fmt); + switch (hsub) { + case 1: + x = 0x4; + break; + case 2: + x = 0x2; + break; + default: + WARN_ON(1); + return; + } + + vsub = dpu_format_vert_chroma_subsampling(fmt); + switch (vsub) { + case 1: + y = 0x4; + break; + case 2: + y = 0x2; + break; + default: + WARN_ON(1); + return; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FRAMERESAMPLING); + val &= ~(DELTAX_MASK | DELTAY_MASK); + val |= DELTAX(x) | DELTAY(y); + dpu_fu_write(fu, val, FRAMERESAMPLING); + + val = dpu_fu_read(fu, CONTROL); + val &= ~RASTERMODE_MASK; + val |= RASTERMODE(RASTERMODE__NORMAL); + dpu_fu_write(fu, val, CONTROL); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + bits &= ~Y_BITS_MASK; + shift &= ~Y_SHIFT_MASK; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, bits, COLORCOMPONENTBITS0); + dpu_fu_write(fu, shift, COLORCOMPONENTSHIFT0); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = LAYERXOFFSET(x) | LAYERYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, LAYEROFFSET0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_layeroffset); + +void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CLIPWINDOWOFFSET0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_clipoffset); + +void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h) +{ + u32 val; + + val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CLIPWINDOWDIMENSIONS0); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_clipdimensions); + +static void +fetcheco_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace) +{ + u32 val; + + if (deinterlace) + h /= 2; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, FRAMEDIMENSIONS); + mutex_unlock(&fu->mutex); +} + +void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FRAMERESAMPLING); + val &= ~(DELTAX_MASK | DELTAY_MASK); + val |= DELTAX(x) | DELTAY(y); + dpu_fu_write(fu, val, FRAMERESAMPLING); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetcheco_frameresampling); + +static void fetcheco_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SHDTOKGEN, CONTROLTRIGGER); + mutex_unlock(&fu->mutex); +} + +int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + dev_dbg(dpu->dev, "FetchEco%d with RL and RLAD decoder\n", + fu->id); + break; + case FETCHTYPE__LAYER: + dev_dbg(dpu->dev, "FetchEco%d with fractional " + "plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__WARP: + dev_dbg(dpu->dev, "FetchEco%d with arbitrary warping and " + "fractional plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__ECO: + dev_dbg(dpu->dev, "FetchEco%d with minimum feature set for " + "alpha, chroma and coordinate planes\n", + fu->id); + break; + case FETCHTYPE__PERSP: + dev_dbg(dpu->dev, "FetchEco%d with affine, perspective and " + "arbitrary warping\n", fu->id); + break; + case FETCHTYPE__ROT: + dev_dbg(dpu->dev, "FetchEco%d with affine and arbitrary " + "warping\n", fu->id); + break; + case FETCHTYPE__DECODEL: + dev_dbg(dpu->dev, "FetchEco%d with RL and RLAD decoder, " + "reduced feature set\n", fu->id); + break; + case FETCHTYPE__LAYERL: + dev_dbg(dpu->dev, "FetchEco%d with fractional " + "plane(8 layers), reduced feature set\n", + fu->id); + break; + case FETCHTYPE__ROTL: + dev_dbg(dpu->dev, "FetchEco%d with affine and arbitrary " + "warping, reduced feature set\n", fu->id); + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchEco%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetcheco_fetchtype); + +dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu) +{ + switch (fu->id) { + case 0: + return ID_FETCHECO0; + case 1: + return ID_FETCHECO1; + case 2: + return ID_FETCHECO2; + case 9: + return ID_FETCHECO9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(fetcheco_get_block_id); + +struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fe_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fe_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fe_get); + +void dpu_fe_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fe_put); + +static const struct dpu_fetchunit_ops fe_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetcheco_set_src_buf_dimensions, + .set_fmt = fetcheco_set_fmt, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetcheco_set_framedimensions, + .set_controltrigger = fetcheco_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fe_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fe_ids))) + return; + + fu = dpu->fe_priv[i]; + + fetchunit_shden(fu, true); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SETNUMBUFFERS(16) | SETBURSTLENGTH(16), + BURSTBUFFERMANAGEMENT); + mutex_unlock(&fu->mutex); +} + +int dpu_fe_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetcheco *fe; + struct dpu_fetchunit *fu; + int i; + + fe = devm_kzalloc(dpu->dev, sizeof(*fe), GFP_KERNEL); + if (!fe) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(fe_ids); i++) + if (fe_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fe_ids)) + return -EINVAL; + + fu = &fe->fu; + dpu->fe_priv[i] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_128); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->type = FU_T_FE; + fu->ops = &fe_ops; + fu->name = "fetcheco"; + + mutex_init(&fu->mutex); + + _dpu_fe_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchlayer.c b/drivers/gpu/imx/dpu/dpu-fetchlayer.c new file mode 100644 index 0000000000000000000000000000000000000000..aedbab16212c792a758424546cf6c1577a1b763c --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchlayer.c @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATUS 0x8 +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) +#define FRAMEDIMENSIONS 0x150 +#define FRAMERESAMPLING 0x154 +#define CONTROL 0x158 +#define TRIGGERENABLE 0x15C +#define SHDLDREQ(lm) ((lm) & 0xFF) +#define CONTROLTRIGGER 0x160 +#define START 0x164 +#define FETCHTYPE 0x168 +#define BURSTBUFFERPROPERTIES 0x16C +#define STATUS 0x170 +#define HIDDENSTATUS 0x174 + +static const shadow_load_req_t fl_shdlreqs[] = { + SHLDREQID_FETCHLAYER0, SHLDREQID_FETCHLAYER1, +}; + +struct dpu_fetchlayer { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; + shadow_load_req_t shdlreq; +}; + +static void +fetchlayer_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused1, bool unused2) +{ + u32 val; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, SOURCEBUFFERDIMENSION(fu->sub_id)); + mutex_unlock(&fu->mutex); +} + +static void fetchlayer_set_fmt(struct dpu_fetchunit *fu, u32 fmt, bool unused) +{ + u32 val, bits, shift; + int i, sub_id = fu->sub_id; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(sub_id)); + val &= ~YUVCONVERSIONMODE_MASK; + val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF); + dpu_fu_write(fu, val, LAYERPROPERTY(sub_id)); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, bits, COLORCOMPONENTBITS(sub_id)); + dpu_fu_write(fu, shift, COLORCOMPONENTSHIFT(sub_id)); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +static void +fetchlayer_set_framedimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h, bool unused) +{ + u32 val; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, FRAMEDIMENSIONS); + mutex_unlock(&fu->mutex); +} + +void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR(fu->id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchlayer_rgb_constantcolor); + +void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR(fu->id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchlayer_yuv_constantcolor); + +static void fetchlayer_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SHDTOKGEN, CONTROLTRIGGER); + mutex_unlock(&fu->mutex); +} + +int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + dev_dbg(dpu->dev, "FetchLayer%d with RL and RLAD decoder\n", + fu->id); + break; + case FETCHTYPE__LAYER: + dev_dbg(dpu->dev, "FetchLayer%d with fractional " + "plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__WARP: + dev_dbg(dpu->dev, "FetchLayer%d with arbitrary warping and " + "fractional plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__ECO: + dev_dbg(dpu->dev, "FetchLayer%d with minimum feature set for " + "alpha, chroma and coordinate planes\n", + fu->id); + break; + case FETCHTYPE__PERSP: + dev_dbg(dpu->dev, "FetchLayer%d with affine, perspective and " + "arbitrary warping\n", fu->id); + break; + case FETCHTYPE__ROT: + dev_dbg(dpu->dev, "FetchLayer%d with affine and arbitrary " + "warping\n", fu->id); + break; + case FETCHTYPE__DECODEL: + dev_dbg(dpu->dev, "FetchLayer%d with RL and RLAD decoder, " + "reduced feature set\n", fu->id); + break; + case FETCHTYPE__LAYERL: + dev_dbg(dpu->dev, "FetchLayer%d with fractional " + "plane(8 layers), reduced feature set\n", + fu->id); + break; + case FETCHTYPE__ROTL: + dev_dbg(dpu->dev, "FetchLayer%d with affine and arbitrary " + "warping, reduced feature set\n", fu->id); + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchLayer%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchlayer_fetchtype); + +struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fl_ids); i++) + if (fl_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fl_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fl_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fl_get); + +void dpu_fl_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fl_put); + +static const struct dpu_fetchunit_ops fl_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetchlayer_set_src_buf_dimensions, + .set_fmt = fetchlayer_set_fmt, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetchlayer_set_framedimensions, + .set_controltrigger = fetchlayer_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fl_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fl_ids); i++) + if (fl_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fl_ids))) + return; + + fu = dpu->fl_priv[i]; + + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + fetchunit_shdldreq_sticky(fu, 0xFF); + fetchunit_disable_src_buf(fu); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SETNUMBUFFERS(16) | SETBURSTLENGTH(16), + BURSTBUFFERMANAGEMENT); + mutex_unlock(&fu->mutex); +} + +int dpu_fl_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchlayer *fl; + struct dpu_fetchunit *fu; + int ret, i; + + fl = devm_kzalloc(dpu->dev, sizeof(*fl), GFP_KERNEL); + if (!fl) + return -ENOMEM; + + fu = &fl->fu; + dpu->fl_priv[id] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->sub_id = 0; + fu->type = FU_T_FL; + fu->ops = &fl_ops; + fu->name = "fetchlayer"; + for (i = 0; i < ARRAY_SIZE(fl_ids); i++) { + if (fl_ids[i] == id) { + fl->shdlreq = fl_shdlreqs[i]; + break; + } + } + mutex_init(&fu->mutex); + + ret = fetchlayer_fetchtype(fu, &fl->fetchtype); + if (ret < 0) + return ret; + + _dpu_fl_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-fetchunit.c b/drivers/gpu/imx/dpu/dpu-fetchunit.c new file mode 100644 index 0000000000000000000000000000000000000000..cae8ff05e43dd54c00be7e69cf5eff9d9dad8b52 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchunit.c @@ -0,0 +1,339 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <video/dpu.h> +#include "dpu-prv.h" + +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) + +void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data) +{ + if (WARN_ON(!fu)) + return; + + fu->dprc = data; +} +EXPORT_SYMBOL_GPL(fetchunit_get_dprc); + +void fetchunit_shden(struct dpu_fetchunit *fu, bool enable) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_fu_write(fu, val, STATICCONTROL); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_shden); + +void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + val &= ~BASEADDRESSAUTOUPDATE_MASK; + val |= BASEADDRESSAUTOUPDATE(layer_mask); + dpu_fu_write(fu, val, STATICCONTROL); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_baddr_autoupdate); + +void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, STATICCONTROL); + val &= ~SHDLDREQSTICKY_MASK; + val |= SHDLDREQSTICKY(layer_mask); + dpu_fu_write(fu, val, STATICCONTROL); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_shdldreq_sticky); + +void fetchunit_set_burstlength(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch) +{ + struct dpu_soc *dpu = fu->dpu; + unsigned int burst_size, burst_length; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + /* + * address TKT343664: + * fetch unit base address has to align to burst size + */ + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + burst_length = burst_size / 8; + } else { + burst_length = 16; + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, BURSTBUFFERMANAGEMENT); + val &= ~SETBURSTLENGTH_MASK; + val |= SETBURSTLENGTH(burst_length); + dpu_fu_write(fu, val, BURSTBUFFERMANAGEMENT); + mutex_unlock(&fu->mutex); + + dev_dbg(dpu->dev, "%s%d burst length is %u\n", + fu->name, fu->id, burst_length); +} +EXPORT_SYMBOL_GPL(fetchunit_set_burstlength); + +void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr) +{ + unsigned int burst_size, stride; + bool nonzero_mod = !!mt_w; + + if (nonzero_mod) { + /* consider PRG x offset to calculate buffer address */ + baddr += (x_offset % mt_w) * (bpp / 8); + + /* + * address TKT343664: + * fetch unit base address has to align to burst size + */ + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + + stride = width * (bpp >> 3); + /* + * address TKT339017: + * fixup for burst size vs stride mismatch + */ + stride = round_up(stride + round_up(baddr % 8, 8), burst_size); + + /* consider PRG y offset to calculate buffer address */ + baddr += (y_offset % mt_h) * stride; + } + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, baddr, BASEADDRESS(fu->sub_id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_baseaddress); + +void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + val &= ~0x3f0000; + val |= BITSPERPIXEL(bpp); + dpu_fu_write(fu, val, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_src_bpp); + +/* + * The arguments width and bpp are valid only when use_prefetch is true. + * For fetcheco, since the pixel format has to be NV12 or NV21 when + * use_prefetch is true, we assume width stands for how many UV we have + * in bytes for one line, while bpp should be 8bits for every U or V component. + */ +void fetchunit_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch) +{ + unsigned int burst_size; + bool nonzero_mod = !!mt_w; + u32 val; + + if (use_prefetch) { + /* consider PRG x offset to calculate buffer address */ + if (nonzero_mod) + baddr += (x_offset % mt_w) * (bpp / 8); + + /* + * address TKT343664: + * fetch unit base address has to align to burst size + */ + burst_size = 1 << (ffs(baddr) - 1); + burst_size = round_up(burst_size, 8); + burst_size = min(burst_size, 128U); + + stride = width * (bpp >> 3); + /* + * address TKT339017: + * fixup for burst size vs stride mismatch + */ + if (nonzero_mod) + stride = round_up(stride + round_up(baddr % 8, 8), + burst_size); + else + stride = round_up(stride, burst_size); + } + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + val &= ~0xffff; + val |= STRIDE(stride); + dpu_fu_write(fu, val, SOURCEBUFFERATTRIBUTES(fu->sub_id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_set_src_stride); + +void fetchunit_enable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + val |= SOURCEBUFFERENABLE; + dpu_fu_write(fu, val, LAYERPROPERTY(fu->sub_id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_enable_src_buf); + +void fetchunit_disable_src_buf(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + val &= ~SOURCEBUFFERENABLE; + dpu_fu_write(fu, val, LAYERPROPERTY(fu->sub_id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchunit_disable_src_buf); + +bool fetchunit_is_enabled(struct dpu_fetchunit *fu) +{ + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id)); + mutex_unlock(&fu->mutex); + + return !!(val & SOURCEBUFFERENABLE); +} +EXPORT_SYMBOL_GPL(fetchunit_is_enabled); + +unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return DPU_PLANE_SRC_DISABLED; + + return fu->stream_id; +} +EXPORT_SYMBOL_GPL(fetchunit_get_stream_id); + +void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id) +{ + if (WARN_ON(!fu)) + return; + + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + fu->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(fetchunit_set_stream_id); + +void fetchunit_pin_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return; + + fu->pin_off = true; +} +EXPORT_SYMBOL_GPL(fetchunit_pin_off); + +void fetchunit_unpin_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return; + + fu->pin_off = false; +} +EXPORT_SYMBOL_GPL(fetchunit_unpin_off); + +bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->pin_off; +} +EXPORT_SYMBOL_GPL(fetchunit_is_pinned_off); + +bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FD; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchdecode); + +bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FE; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetcheco); + +bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FL; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchlayer); + +bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu) +{ + if (WARN_ON(!fu)) + return false; + + return fu->type == FU_T_FW; +} +EXPORT_SYMBOL_GPL(fetchunit_is_fetchwarp); diff --git a/drivers/gpu/imx/dpu/dpu-fetchwarp.c b/drivers/gpu/imx/dpu/dpu-fetchwarp.c new file mode 100644 index 0000000000000000000000000000000000000000..fc9eb7047aa6254127dbeac7c4cd0d8d94feb2f4 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-fetchwarp.c @@ -0,0 +1,332 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATUS 0x8 +#define BASEADDRESS(n) (0x10 + (n) * 0x28) +#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28) +#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28) +#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28) +#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28) +#define LAYEROFFSET(n) (0x24 + (n) * 0x28) +#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28) +#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28) +#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28) +#define LAYERPROPERTY(n) (0x34 + (n) * 0x28) +#define FRAMEDIMENSIONS 0x150 +#define FRAMERESAMPLING 0x154 +#define WARPCONTROL 0x158 +#define ARBSTARTX 0x15c +#define ARBSTARTY 0x160 +#define ARBDELTA 0x164 +#define FIRPOSITIONS 0x168 +#define FIRCOEFFICIENTS 0x16c +#define CONTROL 0x170 +#define TRIGGERENABLE 0x174 +#define SHDLDREQ(lm) ((lm) & 0xFF) +#define CONTROLTRIGGER 0x178 +#define START 0x17c +#define FETCHTYPE 0x180 +#define BURSTBUFFERPROPERTIES 0x184 +#define STATUS 0x188 +#define HIDDENSTATUS 0x18c + +struct dpu_fetchwarp { + struct dpu_fetchunit fu; + fetchtype_t fetchtype; +}; + +static void +fetchwarp_set_src_buf_dimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + u32 unused1, bool unused2) +{ + u32 val; + + val = LINEWIDTH(w) | LINECOUNT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, SOURCEBUFFERDIMENSION(fu->sub_id)); + mutex_unlock(&fu->mutex); +} + +static void fetchwarp_set_fmt(struct dpu_fetchunit *fu, + u32 fmt, bool unused) +{ + u32 val, bits, shift; + int i, sub_id = fu->sub_id; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, LAYERPROPERTY(sub_id)); + val &= ~YUVCONVERSIONMODE_MASK; + dpu_fu_write(fu, val, LAYERPROPERTY(sub_id)); + mutex_unlock(&fu->mutex); + + for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) { + if (dpu_pixel_format_matrix[i].pixel_format == fmt) { + bits = dpu_pixel_format_matrix[i].bits; + shift = dpu_pixel_format_matrix[i].shift; + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, bits, COLORCOMPONENTBITS(sub_id)); + dpu_fu_write(fu, shift, COLORCOMPONENTSHIFT(sub_id)); + mutex_unlock(&fu->mutex); + return; + } + } + + WARN_ON(1); +} + +static void +fetchwarp_set_framedimensions(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, bool unused) +{ + u32 val; + + val = FRAMEWIDTH(w) | FRAMEHEIGHT(h); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, FRAMEDIMENSIONS); + mutex_unlock(&fu->mutex); +} + +void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a) +{ + u32 val; + + val = rgb_color(r, g, b, a); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR(fu->id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchwarp_rgb_constantcolor); + +void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v) +{ + u32 val; + + val = yuv_color(y, u, v); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, val, CONSTANTCOLOR(fu->id)); + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(fetchwarp_yuv_constantcolor); + +static void fetchwarp_set_controltrigger(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SHDTOKGEN, CONTROLTRIGGER); + mutex_unlock(&fu->mutex); +} + +int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type) +{ + struct dpu_soc *dpu = fu->dpu; + u32 val; + + mutex_lock(&fu->mutex); + val = dpu_fu_read(fu, FETCHTYPE); + val &= FETCHTYPE_MASK; + mutex_unlock(&fu->mutex); + + switch (val) { + case FETCHTYPE__DECODE: + dev_dbg(dpu->dev, "FetchWarp%d with RL and RLAD decoder\n", + fu->id); + break; + case FETCHTYPE__LAYER: + dev_dbg(dpu->dev, "FetchWarp%d with fractional " + "plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__WARP: + dev_dbg(dpu->dev, "FetchWarp%d with arbitrary warping and " + "fractional plane(8 layers)\n", fu->id); + break; + case FETCHTYPE__ECO: + dev_dbg(dpu->dev, "FetchWarp%d with minimum feature set for " + "alpha, chroma and coordinate planes\n", + fu->id); + break; + case FETCHTYPE__PERSP: + dev_dbg(dpu->dev, "FetchWarp%d with affine, perspective and " + "arbitrary warping\n", fu->id); + break; + case FETCHTYPE__ROT: + dev_dbg(dpu->dev, "FetchWarp%d with affine and arbitrary " + "warping\n", fu->id); + break; + case FETCHTYPE__DECODEL: + dev_dbg(dpu->dev, "FetchWarp%d with RL and RLAD decoder, " + "reduced feature set\n", fu->id); + break; + case FETCHTYPE__LAYERL: + dev_dbg(dpu->dev, "FetchWarp%d with fractional " + "plane(8 layers), reduced feature set\n", + fu->id); + break; + case FETCHTYPE__ROTL: + dev_dbg(dpu->dev, "FetchWarp%d with affine and arbitrary " + "warping, reduced feature set\n", fu->id); + break; + default: + dev_warn(dpu->dev, "Invalid fetch type %u for FetchWarp%d\n", + val, fu->id); + return -EINVAL; + } + + *type = val; + return 0; +} +EXPORT_SYMBOL_GPL(fetchwarp_fetchtype); + +struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fw_ids)) + return ERR_PTR(-EINVAL); + + fu = dpu->fw_priv[i]; + + mutex_lock(&fu->mutex); + + if (fu->inuse) { + mutex_unlock(&fu->mutex); + return ERR_PTR(-EBUSY); + } + + fu->inuse = true; + + mutex_unlock(&fu->mutex); + + return fu; +} +EXPORT_SYMBOL_GPL(dpu_fw_get); + +void dpu_fw_put(struct dpu_fetchunit *fu) +{ + mutex_lock(&fu->mutex); + + fu->inuse = false; + + mutex_unlock(&fu->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fw_put); + +static const struct dpu_fetchunit_ops fw_ops = { + .set_burstlength = fetchunit_set_burstlength, + .set_baseaddress = fetchunit_set_baseaddress, + .set_src_bpp = fetchunit_set_src_bpp, + .set_src_stride = fetchunit_set_src_stride, + .set_src_buf_dimensions = fetchwarp_set_src_buf_dimensions, + .set_fmt = fetchwarp_set_fmt, + .enable_src_buf = fetchunit_enable_src_buf, + .disable_src_buf = fetchunit_disable_src_buf, + .is_enabled = fetchunit_is_enabled, + .set_framedimensions = fetchwarp_set_framedimensions, + .set_controltrigger = fetchwarp_set_controltrigger, + .get_stream_id = fetchunit_get_stream_id, + .set_stream_id = fetchunit_set_stream_id, + .pin_off = fetchunit_pin_off, + .unpin_off = fetchunit_unpin_off, + .is_pinned_off = fetchunit_is_pinned_off, +}; + +void _dpu_fw_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_fetchunit *fu; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fw_ids))) + return; + + fu = dpu->fw_priv[i]; + + fetchunit_baddr_autoupdate(fu, 0x0); + fetchunit_shden(fu, true); + fetchunit_shdldreq_sticky(fu, 0xFF); + fetchunit_disable_src_buf(fu); + + mutex_lock(&fu->mutex); + dpu_fu_write(fu, SETNUMBUFFERS(16) | SETBURSTLENGTH(16), + BURSTBUFFERMANAGEMENT); + mutex_unlock(&fu->mutex); +} + +int dpu_fw_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_fetchwarp *fw; + struct dpu_fetchunit *fu; + int i, ret; + + fw = devm_kzalloc(dpu->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(fw_ids); i++) + if (fw_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fw_ids)) + return -EINVAL; + + fu = &fw->fu; + dpu->fw_priv[i] = fu; + + fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16); + if (!fu->pec_base) + return -ENOMEM; + + fu->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!fu->base) + return -ENOMEM; + + fu->dpu = dpu; + fu->id = id; + fu->sub_id = 0; + fu->type = FU_T_FW; + fu->ops = &fw_ops; + fu->name = "fetchwarp"; + + mutex_init(&fu->mutex); + + ret = fetchwarp_fetchtype(fu, &fw->fetchtype); + if (ret < 0) + return ret; + + _dpu_fw_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-framegen.c b/drivers/gpu/imx/dpu/dpu-framegen.c new file mode 100644 index 0000000000000000000000000000000000000000..994d6ac0c0e5868d5ad609ee724747e2c754c303 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-framegen.c @@ -0,0 +1,778 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <soc/imx8/sc/sci.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define FGSTCTRL 0x8 +#define FGSYNCMODE_MASK 0x6 +#define HTCFG1 0xC +#define HTOTAL(n) ((((n) - 1) & 0x3FFF) << 16) +#define HACT(n) ((n) & 0x3FFF) +#define HTCFG2 0x10 +#define HSEN BIT(31) +#define HSBP(n) ((((n) - 1) & 0x3FFF) << 16) +#define HSYNC(n) (((n) - 1) & 0x3FFF) +#define VTCFG1 0x14 +#define VTOTAL(n) ((((n) - 1) & 0x3FFF) << 16) +#define VACT(n) ((n) & 0x3FFF) +#define VTCFG2 0x18 +#define VSEN BIT(31) +#define VSBP(n) ((((n) - 1) & 0x3FFF) << 16) +#define VSYNC(n) (((n) - 1) & 0x3FFF) +#define INTCONFIG(n) (0x1C + 4 * (n)) +#define EN BIT(31) +#define ROW(n) (((n) & 0x3FFF) << 16) +#define COL(n) ((n) & 0x3FFF) +#define PKICKCONFIG 0x2C +#define SKICKCONFIG 0x30 +#define SECSTATCONFIG 0x34 +#define FGSRCR1 0x38 +#define FGSRCR2 0x3C +#define FGSRCR3 0x40 +#define FGSRCR4 0x44 +#define FGSRCR5 0x48 +#define FGSRCR6 0x4C +#define FGKSDR 0x50 +#define PACFG 0x54 +#define STARTX(n) (((n) + 1) & 0x3FFF) +#define STARTY(n) (((((n) + 1) & 0x3FFF)) << 16) +#define SACFG 0x58 +#define FGINCTRL 0x5C +#define FGDM_MASK 0x7 +#define ENPRIMALPHA BIT(3) +#define ENSECALPHA BIT(4) +#define FGINCTRLPANIC 0x60 +#define FGCCR 0x64 +#define CCALPHA(a) (((a) & 0x1) << 30) +#define CCRED(r) (((r) & 0x3FF) << 20) +#define CCGREEN(g) (((g) & 0x3FF) << 10) +#define CCBLUE(b) ((b) & 0x3FF) +#define FGENABLE 0x68 +#define FGEN BIT(0) +#define FGSLR 0x6C +#define FGENSTS 0x70 +#define ENSTS BIT(0) +#define FGTIMESTAMP 0x74 +#define LINEINDEX_MASK 0x3FFF +#define LINEINDEX_SHIFT 0 +#define FRAMEINDEX_MASK 0xFFFFC000 +#define FRAMEINDEX_SHIFT 14 +#define FGCHSTAT 0x78 +#define SECSYNCSTAT BIT(24) +#define SFIFOEMPTY BIT(16) +#define FGCHSTATCLR 0x7C +#define CLRSECSTAT BIT(16) +#define FGSKEWMON 0x80 +#define FGSFIFOMIN 0x84 +#define FGSFIFOMAX 0x88 +#define FGSFIFOFILLCLR 0x8C +#define FGSREPD 0x90 +#define FGSRFTD 0x94 + +#define KHZ 1000 +#define PLL_MIN_FREQ_HZ 648000000 + +struct dpu_framegen { + void __iomem *base; + struct clk *clk_pll; + struct clk *clk_bypass; + struct clk *clk_disp_sel; + struct clk *clk_disp; + struct mutex mutex; + int id; + bool inuse; + bool use_bypass_clk; + bool encoder_type_has_lvds; + bool side_by_side; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_fg_read(struct dpu_framegen *fg, unsigned int offset) +{ + return readl(fg->base + offset); +} + +static inline void dpu_fg_write(struct dpu_framegen *fg, u32 value, + unsigned int offset) +{ + writel(value, fg->base + offset); +} + +/* FIXME: enable pixel link in a proper manner */ +static void dpu_pixel_link_enable(int dpu_id, int stream_id) +{ + sc_err_t sciErr; + sc_ipc_t ipcHndl = 0; + u32 mu_id; + + sciErr = sc_ipc_getMuID(&mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("Cannot obtain MU ID\n"); + return; + } + + sciErr = sc_ipc_open(&ipcHndl, mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("sc_ipc_open failed! (sciError = %d)\n", sciErr); + return; + } + + if (dpu_id == 0) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, + stream_id ? SC_C_PXL_LINK_MST2_ENB : SC_C_PXL_LINK_MST1_ENB, 1); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST%d_ENB sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } else if (dpu_id == 1) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, + stream_id ? SC_C_PXL_LINK_MST2_ENB : SC_C_PXL_LINK_MST1_ENB, 1); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST%d_ENB sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } + + sc_ipc_close(mu_id); +} + +/* FIXME: disable pixel link in a proper manner */ +static void dpu_pixel_link_disable(int dpu_id, int stream_id) +{ + sc_err_t sciErr; + sc_ipc_t ipcHndl = 0; + u32 mu_id; + + sciErr = sc_ipc_getMuID(&mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("Cannot obtain MU ID\n"); + return; + } + + sciErr = sc_ipc_open(&ipcHndl, mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("sc_ipc_open failed! (sciError = %d)\n", sciErr); + return; + } + + if (dpu_id == 0) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, + stream_id ? SC_C_PXL_LINK_MST2_ENB : SC_C_PXL_LINK_MST1_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST%d_ENB sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } else if (dpu_id == 1) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, + stream_id ? SC_C_PXL_LINK_MST2_ENB : SC_C_PXL_LINK_MST1_ENB, 0); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST%d_ENB sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } + + sc_ipc_close(mu_id); +} + +/* FIXME: set MST address for pixel link in a proper manner */ +static void dpu_pixel_link_set_mst_addr(int dpu_id, int stream_id, int mst_addr) +{ + sc_err_t sciErr; + sc_ipc_t ipcHndl = 0; + u32 mu_id; + + sciErr = sc_ipc_getMuID(&mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("Cannot obtain MU ID\n"); + return; + } + + sciErr = sc_ipc_open(&ipcHndl, mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("sc_ipc_open failed! (sciError = %d)\n", sciErr); + return; + } + + if (dpu_id == 0) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_0, stream_id ? + SC_C_PXL_LINK_MST2_ADDR : SC_C_PXL_LINK_MST1_ADDR, + mst_addr); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_PXL_LINK_MST%d_ADDR sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } else if (dpu_id == 1) { + sciErr = sc_misc_set_control(ipcHndl, SC_R_DC_1, stream_id ? + SC_C_PXL_LINK_MST2_ADDR : SC_C_PXL_LINK_MST1_ADDR, + mst_addr); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_PXL_LINK_MST%d_ADDR sc_misc_set_control failed! (sciError = %d)\n", stream_id + 1, sciErr); + } + + sc_ipc_close(mu_id); +} + +/* FIXME: set dc sync mode for pixel link in a proper manner */ +static void dpu_pixel_link_set_dc_sync_mode(int dpu_id, bool enable) +{ + sc_err_t sciErr; + sc_ipc_t ipcHndl = 0; + u32 mu_id; + + sciErr = sc_ipc_getMuID(&mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("Cannot obtain MU ID\n"); + return; + } + + sciErr = sc_ipc_open(&ipcHndl, mu_id); + if (sciErr != SC_ERR_NONE) { + pr_err("sc_ipc_open failed! (sciError = %d)\n", sciErr); + return; + } + + if (dpu_id == 0) { + sciErr = sc_misc_set_control(ipcHndl, + SC_R_DC_0, SC_C_MODE, enable); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_0:SC_C_MODE sc_misc_set_control failed! (sciError = %d)\n", sciErr); + } else if (dpu_id == 1) { + sciErr = sc_misc_set_control(ipcHndl, + SC_R_DC_1, SC_C_MODE, enable); + if (sciErr != SC_ERR_NONE) + pr_err("SC_R_DC_1:SC_C_MODE sc_misc_set_control failed! (sciError = %d)\n", sciErr); + } + + sc_ipc_close(mu_id); +} + +void framegen_enable(struct dpu_framegen *fg) +{ + struct dpu_soc *dpu = fg->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + + mutex_lock(&fg->mutex); + dpu_fg_write(fg, FGEN, FGENABLE); + mutex_unlock(&fg->mutex); + + if (!(devtype->has_dual_ldb && fg->encoder_type_has_lvds)) + dpu_pixel_link_enable(dpu->id, fg->id); +} +EXPORT_SYMBOL_GPL(framegen_enable); + +void framegen_disable(struct dpu_framegen *fg) +{ + struct dpu_soc *dpu = fg->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + + if (!(devtype->has_dual_ldb && fg->encoder_type_has_lvds)) + dpu_pixel_link_disable(dpu->id, fg->id); + + mutex_lock(&fg->mutex); + dpu_fg_write(fg, 0, FGENABLE); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_disable); + +void framegen_shdtokgen(struct dpu_framegen *fg) +{ + mutex_lock(&fg->mutex); + dpu_fg_write(fg, SHDTOKGEN, FGSLR); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_shdtokgen); + +void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode) +{ + struct dpu_soc *dpu = fg->dpu; + u32 val; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, FGSTCTRL); + val &= ~FGSYNCMODE_MASK; + val |= mode; + dpu_fg_write(fg, val, FGSTCTRL); + mutex_unlock(&fg->mutex); + + dpu_pixel_link_set_dc_sync_mode(dpu->id, mode != FGSYNCMODE__OFF); +} +EXPORT_SYMBOL_GPL(framegen_syncmode); + +void +framegen_cfg_videomode(struct dpu_framegen *fg, + struct drm_display_mode *m, bool side_by_side, + bool encoder_type_has_tmds, bool encoder_type_has_lvds) +{ + struct dpu_soc *dpu = fg->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + u32 hact, htotal, hsync, hsbp; + u32 vact, vtotal, vsync, vsbp; + u32 kick_row, kick_col; + u32 val; + unsigned long disp_clock_rate, pll_clock_rate = 0; + int div = 0; + + fg->side_by_side = side_by_side; + fg->encoder_type_has_lvds = encoder_type_has_lvds; + + hact = m->crtc_hdisplay; + htotal = m->crtc_htotal; + hsync = m->crtc_hsync_end - m->crtc_hsync_start; + hsbp = m->crtc_htotal - m->crtc_hsync_start; + + if (side_by_side) { + hact /= 2; + htotal /= 2; + hsync /= 2; + hsbp /= 2; + } + + vact = m->crtc_vdisplay; + vtotal = m->crtc_vtotal; + vsync = m->crtc_vsync_end - m->crtc_vsync_start; + vsbp = m->crtc_vtotal - m->crtc_vsync_start; + + mutex_lock(&fg->mutex); + /* video mode */ + dpu_fg_write(fg, HACT(hact) | HTOTAL(htotal), HTCFG1); + dpu_fg_write(fg, HSYNC(hsync) | HSBP(hsbp) | HSEN, HTCFG2); + dpu_fg_write(fg, VACT(vact) | VTOTAL(vtotal), VTCFG1); + dpu_fg_write(fg, VSYNC(vsync) | VSBP(vsbp) | VSEN, VTCFG2); + + kick_col = hact + 1; + kick_row = vact; + /* + * FrameGen as slave needs to be kicked later for + * one line comparing to the master. + */ + if (side_by_side && framegen_is_slave(fg) && + devtype->has_syncmode_fixup) + kick_row++; + + /* pkickconfig */ + dpu_fg_write(fg, COL(kick_col) | ROW(kick_row) | EN, PKICKCONFIG); + + /* skikconfig */ + dpu_fg_write(fg, COL(kick_col) | ROW(kick_row) | EN, SKICKCONFIG); + + /* primary position config */ + dpu_fg_write(fg, STARTX(0) | STARTY(0), PACFG); + + /* alpha */ + val = dpu_fg_read(fg, FGINCTRL); + val &= ~(ENPRIMALPHA | ENSECALPHA); + dpu_fg_write(fg, val, FGINCTRL); + + val = dpu_fg_read(fg, FGINCTRLPANIC); + val &= ~(ENPRIMALPHA | ENSECALPHA); + dpu_fg_write(fg, val, FGINCTRLPANIC); + + /* constant color */ + dpu_fg_write(fg, 0, FGCCR); + mutex_unlock(&fg->mutex); + + disp_clock_rate = m->clock * 1000; + + /* + * To workaround setting clock rate failure issue + * when the system resumes back from PM sleep mode, + * we need to get the clock rates before setting + * their rates, otherwise, setting the clock rates + * will fail. + */ + if (devtype->has_disp_sel_clk && encoder_type_has_tmds) { + if (side_by_side) + dpu_pixel_link_set_mst_addr(dpu->id, fg->id, + fg->id ? 2 : 1); + else + dpu_pixel_link_set_mst_addr(dpu->id, fg->id, 1); + + clk_set_parent(fg->clk_disp_sel, fg->clk_bypass); + + fg->use_bypass_clk = true; + } else { + dpu_pixel_link_set_mst_addr(dpu->id, fg->id, 0); + + /* find an even divisor for PLL */ + do { + div += 2; + pll_clock_rate = disp_clock_rate * div; + } while (pll_clock_rate < PLL_MIN_FREQ_HZ); + + if (devtype->has_disp_sel_clk) + clk_set_parent(fg->clk_disp_sel, fg->clk_pll); + + clk_get_rate(fg->clk_pll); + clk_get_rate(fg->clk_disp); + clk_set_rate(fg->clk_pll, pll_clock_rate); + clk_set_rate(fg->clk_disp, disp_clock_rate); + + fg->use_bypass_clk = false; + } +} +EXPORT_SYMBOL_GPL(framegen_cfg_videomode); + +void framegen_pkickconfig(struct dpu_framegen *fg, bool enable) +{ + u32 val; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, PKICKCONFIG); + if (enable) + val |= EN; + else + val &= ~EN; + dpu_fg_write(fg, val, PKICKCONFIG); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_pkickconfig); + +void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable) +{ + struct dpu_soc *dpu = fg->dpu; + u32 val; + + if (!dpu->devtype->has_syncmode_fixup) + return; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, SECSTATCONFIG); + if (enable) + val |= BIT(7); + else + val &= ~BIT(7); + dpu_fg_write(fg, val, SECSTATCONFIG); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_syncmode_fixup); + +void framegen_sacfg(struct dpu_framegen *fg, unsigned int x, unsigned int y) +{ + mutex_lock(&fg->mutex); + dpu_fg_write(fg, STARTX(x) | STARTY(y), SACFG); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_sacfg); + +void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode) +{ + u32 val; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, FGINCTRL); + val &= ~FGDM_MASK; + val |= mode; + dpu_fg_write(fg, val, FGINCTRL); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_displaymode); + +void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode) +{ + u32 val; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, FGINCTRLPANIC); + val &= ~FGDM_MASK; + val |= mode; + dpu_fg_write(fg, val, FGINCTRLPANIC); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_panic_displaymode); + +void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m) +{ + unsigned long timeout, pending_framedur_jiffies; + int frame_size = m->crtc_htotal * m->crtc_vtotal; + int dotclock, pending_framedur_ns; + u32 val; + + dotclock = clk_get_rate(fg->clk_disp) / KHZ; + if (dotclock == 0) { + /* fall back to display mode's clock */ + dotclock = m->crtc_clock; + + if (!(fg->side_by_side && fg->id == 1)) + dev_warn(fg->dpu->dev, + "pixel clock for FrameGen%d is zero\n", fg->id); + } + + /* + * The SoC designer indicates that there are two pending frames + * to complete in the worst case. + * So, three pending frames are enough for sure. + */ + pending_framedur_ns = div_u64((u64) 3 * frame_size * 1000000, dotclock); + pending_framedur_jiffies = nsecs_to_jiffies(pending_framedur_ns); + if (pending_framedur_jiffies > (3 * HZ)) { + pending_framedur_jiffies = 3 * HZ; + + dev_warn(fg->dpu->dev, + "truncate FrameGen%d pending frame duration to 3sec\n", + fg->id); + } + timeout = jiffies + pending_framedur_jiffies; + + mutex_lock(&fg->mutex); + do { + val = dpu_fg_read(fg, FGENSTS); + } while ((val & ENSTS) && time_before(jiffies, timeout)); + mutex_unlock(&fg->mutex); + + dev_dbg(fg->dpu->dev, "FrameGen%d pending frame duration is %ums\n", + fg->id, jiffies_to_msecs(pending_framedur_jiffies)); + + if (val & ENSTS) + dev_err(fg->dpu->dev, "failed to wait for FrameGen%d done\n", + fg->id); +} +EXPORT_SYMBOL_GPL(framegen_wait_done); + +static inline u32 framegen_frame_index(u32 stamp) +{ + return (stamp & FRAMEINDEX_MASK) >> FRAMEINDEX_SHIFT; +} + +static inline u32 framegen_line_index(u32 stamp) +{ + return (stamp & LINEINDEX_MASK) >> LINEINDEX_SHIFT; +} + +void framegen_read_timestamp(struct dpu_framegen *fg, + u32 *frame_index, u32 *line_index) +{ + u32 stamp; + + mutex_lock(&fg->mutex); + stamp = dpu_fg_read(fg, FGTIMESTAMP); + *frame_index = framegen_frame_index(stamp); + *line_index = framegen_line_index(stamp); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_read_timestamp); + +void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg) +{ + u32 frame_index, line_index, last_frame_index; + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + framegen_read_timestamp(fg, &frame_index, &line_index); + do { + last_frame_index = frame_index; + framegen_read_timestamp(fg, &frame_index, &line_index); + } while (last_frame_index == frame_index && + time_before(jiffies, timeout)); + + if (last_frame_index == frame_index) + dev_err(fg->dpu->dev, + "failed to wait for FrameGen%d frame counter moving\n", + fg->id); + else + dev_dbg(fg->dpu->dev, + "FrameGen%d frame counter moves - last %u, curr %d\n", + fg->id, last_frame_index, frame_index); +} +EXPORT_SYMBOL_GPL(framegen_wait_for_frame_counter_moving); + +bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg) +{ + u32 val; + bool empty; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, FGCHSTAT); + mutex_unlock(&fg->mutex); + + empty = !!(val & SFIFOEMPTY); + + if (empty) + dev_dbg(fg->dpu->dev, + "FrameGen%d secondary requests to read empty FIFO\n", + fg->id); + + return empty; +} +EXPORT_SYMBOL_GPL(framegen_secondary_requests_to_read_empty_fifo); + +void framegen_secondary_clear_channel_status(struct dpu_framegen *fg) +{ + mutex_lock(&fg->mutex); + dpu_fg_write(fg, CLRSECSTAT, FGCHSTATCLR); + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(framegen_secondary_clear_channel_status); + +bool framegen_secondary_is_syncup(struct dpu_framegen *fg) +{ + u32 val; + + mutex_lock(&fg->mutex); + val = dpu_fg_read(fg, FGCHSTAT); + mutex_unlock(&fg->mutex); + + return val & SECSYNCSTAT; +} +EXPORT_SYMBOL_GPL(framegen_secondary_is_syncup); + +void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + bool syncup; + + do { + syncup = framegen_secondary_is_syncup(fg); + } while (!syncup && time_before(jiffies, timeout)); + + if (syncup) + dev_dbg(fg->dpu->dev, "FrameGen%d secondary syncup\n", fg->id); + else + dev_err(fg->dpu->dev, + "failed to wait for FrameGen%d secondary syncup\n", + fg->id); +} +EXPORT_SYMBOL_GPL(framegen_wait_for_secondary_syncup); + +void framegen_enable_clock(struct dpu_framegen *fg) +{ + if (!fg->use_bypass_clk) + clk_prepare_enable(fg->clk_pll); + clk_prepare_enable(fg->clk_disp); +} +EXPORT_SYMBOL_GPL(framegen_enable_clock); + +void framegen_disable_clock(struct dpu_framegen *fg) +{ + if (!fg->use_bypass_clk) + clk_disable_unprepare(fg->clk_pll); + clk_disable_unprepare(fg->clk_disp); +} +EXPORT_SYMBOL_GPL(framegen_disable_clock); + +bool framegen_is_master(struct dpu_framegen *fg) +{ + const struct dpu_devtype *devtype = fg->dpu->devtype; + + return fg->id == devtype->master_stream_id; +} +EXPORT_SYMBOL_GPL(framegen_is_master); + +bool framegen_is_slave(struct dpu_framegen *fg) +{ + return !framegen_is_master(fg); +} +EXPORT_SYMBOL_GPL(framegen_is_slave); + +struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id) +{ + struct dpu_framegen *fg; + int i; + + for (i = 0; i < ARRAY_SIZE(fg_ids); i++) + if (fg_ids[i] == id) + break; + + if (i == ARRAY_SIZE(fg_ids)) + return ERR_PTR(-EINVAL); + + fg = dpu->fg_priv[i]; + + mutex_lock(&fg->mutex); + + if (fg->inuse) { + mutex_unlock(&fg->mutex); + return ERR_PTR(-EBUSY); + } + + fg->inuse = true; + + mutex_unlock(&fg->mutex); + + return fg; +} +EXPORT_SYMBOL_GPL(dpu_fg_get); + +void dpu_fg_put(struct dpu_framegen *fg) +{ + mutex_lock(&fg->mutex); + + fg->inuse = false; + + mutex_unlock(&fg->mutex); +} +EXPORT_SYMBOL_GPL(dpu_fg_put); + +struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg) +{ + return fg->dpu->fg_priv[fg->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_fg_peek); + +void _dpu_fg_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_framegen *fg; + int i; + + for (i = 0; i < ARRAY_SIZE(fg_ids); i++) + if (fg_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(fg_ids))) + return; + + fg = dpu->fg_priv[i]; + + framegen_syncmode(fg, FGSYNCMODE__OFF); +} + +int dpu_fg_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_framegen *fg; + + fg = devm_kzalloc(dpu->dev, sizeof(*fg), GFP_KERNEL); + if (!fg) + return -ENOMEM; + + dpu->fg_priv[id] = fg; + + fg->base = devm_ioremap(dpu->dev, base, SZ_256); + if (!fg->base) + return -ENOMEM; + + fg->clk_pll = devm_clk_get(dpu->dev, id ? "pll1" : "pll0"); + if (IS_ERR(fg->clk_pll)) + return PTR_ERR(fg->clk_pll); + + if (dpu->devtype->has_disp_sel_clk) { + fg->clk_bypass = devm_clk_get(dpu->dev, "bypass0"); + if (IS_ERR(fg->clk_bypass)) + return PTR_ERR(fg->clk_bypass); + + fg->clk_disp_sel = devm_clk_get(dpu->dev, + id ? "disp1_sel" : "disp0_sel"); + if (IS_ERR(fg->clk_disp_sel)) + return PTR_ERR(fg->clk_disp_sel); + } + + fg->clk_disp = devm_clk_get(dpu->dev, id ? "disp1" : "disp0"); + if (IS_ERR(fg->clk_disp)) + return PTR_ERR(fg->clk_disp); + + fg->dpu = dpu; + fg->id = id; + mutex_init(&fg->mutex); + + _dpu_fg_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-hscaler.c b/drivers/gpu/imx/dpu/dpu-hscaler.c new file mode 100644 index 0000000000000000000000000000000000000000..5f6d9eb55436246fc8f4a696fffeea02ade66d96 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-hscaler.c @@ -0,0 +1,395 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F + +#define SETUP1 0xC +#define SCALE_FACTOR_MASK 0xFFFFF +#define SCALE_FACTOR(n) ((n) & 0xFFFFF) +#define SETUP2 0x10 +#define PHASE_OFFSET_MASK 0x1FFFFF +#define PHASE_OFFSET(n) ((n) & 0x1FFFFF) +#define CONTROL 0x14 +#define OUTPUT_SIZE_MASK 0x3FFF0000 +#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK) +#define FILTER_MODE 0x100 +#define SCALE_MODE 0x10 +#define MODE 0x1 + +static const hs_src_sel_t src_sels[3][6] = { + { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__EXTSRC4, + HS_SRC_SEL__FETCHDECODE0, + HS_SRC_SEL__FETCHDECODE2, + HS_SRC_SEL__MATRIX4, + HS_SRC_SEL__VSCALER4, + }, { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__EXTSRC5, + HS_SRC_SEL__FETCHDECODE1, + HS_SRC_SEL__FETCHDECODE3, + HS_SRC_SEL__MATRIX5, + HS_SRC_SEL__VSCALER5, + }, { + HS_SRC_SEL__DISABLE, + HS_SRC_SEL__MATRIX9, + HS_SRC_SEL__VSCALER9, + HS_SRC_SEL__FILTER9, + }, +}; + +struct dpu_hscaler { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; +}; + +static inline u32 dpu_pec_hs_read(struct dpu_hscaler *hs, + unsigned int offset) +{ + return readl(hs->pec_base + offset); +} + +static inline void dpu_pec_hs_write(struct dpu_hscaler *hs, u32 value, + unsigned int offset) +{ + writel(value, hs->pec_base + offset); +} + +static inline u32 dpu_hs_read(struct dpu_hscaler *hs, unsigned int offset) +{ + return readl(hs->base + offset); +} + +static inline void dpu_hs_write(struct dpu_hscaler *hs, u32 value, + unsigned int offset) +{ + writel(value, hs->base + offset); +} + +int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src) +{ + struct dpu_soc *dpu = hs->dpu; + const unsigned int *block_id_map = dpu->devtype->sw2hw_block_id_map; + const unsigned int hs_id_array[] = {4, 5, 9}; + int i, j; + u32 val, mapped_src; + + for (i = 0; i < ARRAY_SIZE(hs_id_array); i++) + if (hs_id_array[i] == hs->id) + break; + + if (WARN_ON(i == ARRAY_SIZE(hs_id_array))) + return -EINVAL; + + mutex_lock(&hs->mutex); + for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) { + if (src_sels[i][j] == src) { + mapped_src = block_id_map ? block_id_map[src] : src; + if (WARN_ON(mapped_src == NA)) + return -EINVAL; + + val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK; + val |= mapped_src; + dpu_pec_hs_write(hs, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&hs->mutex); + return 0; + } + } + mutex_unlock(&hs->mutex); + + dev_err(dpu->dev, "Invalid source for HScaler%d\n", hs->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(hscaler_pixengcfg_dynamic_src_sel); + +void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_hs_write(hs, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_pixengcfg_clken); + +void hscaler_shden(struct dpu_hscaler *hs, bool enable) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_hs_write(hs, val, STATICCONTROL); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_shden); + +void hscaler_setup1(struct dpu_hscaler *hs, u32 src, u32 dst) +{ + struct dpu_soc *dpu = hs->dpu; + u32 scale_factor; + u64 tmp64; + + if (src == dst) { + scale_factor = 0x80000; + } else { + if (src > dst) { + tmp64 = (u64)((u64)dst * 0x80000); + do_div(tmp64, src); + + } else { + tmp64 = (u64)((u64)src * 0x80000); + do_div(tmp64, dst); + } + scale_factor = (u32)tmp64; + } + + WARN_ON(scale_factor > 0x80000); + + mutex_lock(&hs->mutex); + dpu_hs_write(hs, SCALE_FACTOR(scale_factor), SETUP1); + mutex_unlock(&hs->mutex); + + dev_dbg(dpu->dev, "Hscaler%d scale factor 0x%08x\n", + hs->id, scale_factor); +} +EXPORT_SYMBOL_GPL(hscaler_setup1); + +void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset) +{ + mutex_lock(&hs->mutex); + dpu_hs_write(hs, PHASE_OFFSET(phase_offset), SETUP2); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_setup2); + +void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~OUTPUT_SIZE_MASK; + val |= OUTPUT_SIZE(line_num); + dpu_hs_write(hs, val, CONTROL); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_output_size); + +void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~FILTER_MODE; + val |= m; + dpu_hs_write(hs, val, CONTROL); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_filter_mode); + +void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~SCALE_MODE; + val |= m; + dpu_hs_write(hs, val, CONTROL); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_scale_mode); + +void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + val &= ~MODE; + val |= m; + dpu_hs_write(hs, val, CONTROL); + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(hscaler_mode); + +bool hscaler_is_enabled(struct dpu_hscaler *hs) +{ + u32 val; + + mutex_lock(&hs->mutex); + val = dpu_hs_read(hs, CONTROL); + mutex_unlock(&hs->mutex); + + return (val & MODE) == SCALER_ACTIVE; +} +EXPORT_SYMBOL_GPL(hscaler_is_enabled); + +dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs) +{ + switch (hs->id) { + case 4: + return ID_HSCALER4; + case 5: + return ID_HSCALER5; + case 9: + return ID_HSCALER9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(hscaler_get_block_id); + +unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs) +{ + return hs->stream_id; +} +EXPORT_SYMBOL_GPL(hscaler_get_stream_id); + +void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id) +{ + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + hs->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(hscaler_set_stream_id); + +struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id) +{ + struct dpu_hscaler *hs; + int i; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(hs_ids)) + return ERR_PTR(-EINVAL); + + hs = dpu->hs_priv[i]; + + mutex_lock(&hs->mutex); + + if (hs->inuse) { + mutex_unlock(&hs->mutex); + return ERR_PTR(-EBUSY); + } + + hs->inuse = true; + + mutex_unlock(&hs->mutex); + + return hs; +} +EXPORT_SYMBOL_GPL(dpu_hs_get); + +void dpu_hs_put(struct dpu_hscaler *hs) +{ + mutex_lock(&hs->mutex); + + hs->inuse = false; + + mutex_unlock(&hs->mutex); +} +EXPORT_SYMBOL_GPL(dpu_hs_put); + +void _dpu_hs_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_hscaler *hs; + int i; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(hs_ids))) + return; + + hs = dpu->hs_priv[i]; + + hscaler_shden(hs, true); + hscaler_setup2(hs, 0); + hscaler_pixengcfg_dynamic_src_sel(hs, HS_SRC_SEL__DISABLE); +} + +int dpu_hs_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_hscaler *hs; + int i; + + hs = devm_kzalloc(dpu->dev, sizeof(*hs), GFP_KERNEL); + if (!hs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(hs_ids); i++) + if (hs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(hs_ids)) + return -EINVAL; + + dpu->hs_priv[i] = hs; + + hs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8); + if (!hs->pec_base) + return -ENOMEM; + + hs->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!hs->base) + return -ENOMEM; + + hs->dpu = dpu; + hs->id = id; + + mutex_init(&hs->mutex); + + _dpu_hs_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-layerblend.c b/drivers/gpu/imx/dpu/dpu-layerblend.c new file mode 100644 index 0000000000000000000000000000000000000000..e0245998a96bd17ff5abbfe86f33fb984f03c2c1 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-layerblend.c @@ -0,0 +1,343 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK 0x3F +#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK 0x3F00 +#define PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT 8 + +static const lb_prim_sel_t prim_sels[] = { + LB_PRIM_SEL__DISABLE, + LB_PRIM_SEL__BLITBLEND9, + LB_PRIM_SEL__CONSTFRAME0, + LB_PRIM_SEL__CONSTFRAME1, + LB_PRIM_SEL__CONSTFRAME4, + LB_PRIM_SEL__CONSTFRAME5, + LB_PRIM_SEL__MATRIX4, + LB_PRIM_SEL__HSCALER4, + LB_PRIM_SEL__VSCALER4, + LB_PRIM_SEL__EXTSRC4, + LB_PRIM_SEL__MATRIX5, + LB_PRIM_SEL__HSCALER5, + LB_PRIM_SEL__VSCALER5, + LB_PRIM_SEL__EXTSRC5, + LB_PRIM_SEL__LAYERBLEND0, + LB_PRIM_SEL__LAYERBLEND1, + LB_PRIM_SEL__LAYERBLEND2, + LB_PRIM_SEL__LAYERBLEND3, + LB_PRIM_SEL__LAYERBLEND4, + LB_PRIM_SEL__LAYERBLEND5, +}; + +#define PIXENGCFG_STATUS 0xC +#define SHDTOKSEL (0x3 << 3) +#define SHDTOKSEL_SHIFT 3 +#define SHDLDSEL (0x3 << 1) +#define SHDLDSEL_SHIFT 1 +#define CONTROL 0xC +#define MODE_MASK BIT(0) +#define BLENDCONTROL 0x10 +#define ALPHA(a) (((a) & 0xFF) << 16) +#define PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA 0x5 +#define PRIM_C_BLD_FUNC__PRIM_ALPHA 0x2 +#define SEC_C_BLD_FUNC__CONST_ALPHA (0x6 << 4) +#define SEC_C_BLD_FUNC__ONE_MINUS_PRIM_ALPHA (0x3 << 4) +#define PRIM_A_BLD_FUNC__ONE_MINUS_SEC_ALPHA (0x5 << 8) +#define PRIM_A_BLD_FUNC__ZERO (0x0 << 8) +#define SEC_A_BLD_FUNC__ONE (0x1 << 12) +#define SEC_A_BLD_FUNC__ZERO (0x0 << 12) +#define POSITION 0x14 +#define XPOS(x) ((x) & 0x7FFF) +#define YPOS(y) (((y) & 0x7FFF) << 16) +#define PRIMCONTROLWORD 0x18 +#define SECCONTROLWORD 0x1C + +struct dpu_layerblend { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_lb_read(struct dpu_layerblend *lb, + unsigned int offset) +{ + return readl(lb->pec_base + offset); +} + +static inline void dpu_pec_lb_write(struct dpu_layerblend *lb, u32 value, + unsigned int offset) +{ + writel(value, lb->pec_base + offset); +} + +static inline u32 dpu_lb_read(struct dpu_layerblend *lb, unsigned int offset) +{ + return readl(lb->base + offset); +} + +static inline void dpu_lb_write(struct dpu_layerblend *lb, u32 value, + unsigned int offset) +{ + writel(value, lb->base + offset); +} + +int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb, + lb_prim_sel_t prim) +{ + struct dpu_soc *dpu = lb->dpu; + const unsigned int *block_id_map = dpu->devtype->sw2hw_block_id_map; + int fixed_sels_num = ARRAY_SIZE(prim_sels) - 6; + int i; + u32 val, mapped_prim; + + mutex_lock(&lb->mutex); + for (i = 0; i < fixed_sels_num + lb->id; i++) { + if (prim_sels[i] == prim) { + mapped_prim = block_id_map ? block_id_map[prim] : prim; + if (WARN_ON(mapped_prim == NA)) + return -EINVAL; + + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_PRIM_SEL_MASK; + val |= mapped_prim; + dpu_pec_lb_write(lb, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&lb->mutex); + return 0; + } + } + mutex_unlock(&lb->mutex); + + dev_err(dpu->dev, "Invalid primary source for LayerBlend%d\n", lb->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_prim_sel); + +void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb, + lb_sec_sel_t sec) +{ + struct dpu_soc *dpu = lb->dpu; + const unsigned int *block_id_map = dpu->devtype->sw2hw_block_id_map; + u32 val, mapped_sec; + + mapped_sec = block_id_map ? block_id_map[sec] : sec; + if (WARN_ON(mapped_sec == NA)) + return; + + mutex_lock(&lb->mutex); + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SEC_SEL_MASK; + val |= mapped_sec << PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT; + dpu_pec_lb_write(lb, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_sec_sel); + +void layerblend_pixengcfg_clken(struct dpu_layerblend *lb, + pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_lb_write(lb, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_pixengcfg_clken); + +void layerblend_shden(struct dpu_layerblend *lb, bool enable) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_lb_write(lb, val, STATICCONTROL); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shden); + +void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + val &= ~SHDTOKSEL; + val |= (sel << SHDTOKSEL_SHIFT); + dpu_lb_write(lb, val, STATICCONTROL); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shdtoksel); + +void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, STATICCONTROL); + val &= ~SHDLDSEL; + val |= (sel << SHDLDSEL_SHIFT); + dpu_lb_write(lb, val, STATICCONTROL); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_shdldsel); + +void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode) +{ + u32 val; + + mutex_lock(&lb->mutex); + val = dpu_lb_read(lb, CONTROL); + val &= ~MODE_MASK; + val |= mode; + dpu_lb_write(lb, val, CONTROL); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_control); + +void layerblend_blendcontrol(struct dpu_layerblend *lb, bool sec_from_scaler) +{ + u32 val; + + val = ALPHA(0xff) | + PRIM_C_BLD_FUNC__PRIM_ALPHA | + SEC_C_BLD_FUNC__ONE_MINUS_PRIM_ALPHA | + PRIM_A_BLD_FUNC__ZERO; + + val |= sec_from_scaler ? SEC_A_BLD_FUNC__ZERO : SEC_A_BLD_FUNC__ONE; + + mutex_lock(&lb->mutex); + dpu_lb_write(lb, val, BLENDCONTROL); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_blendcontrol); + +void layerblend_position(struct dpu_layerblend *lb, int x, int y) +{ + mutex_lock(&lb->mutex); + dpu_lb_write(lb, XPOS(x) | YPOS(y), POSITION); + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(layerblend_position); + +struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id) +{ + struct dpu_layerblend *lb; + int i; + + for (i = 0; i < ARRAY_SIZE(lb_ids); i++) + if (lb_ids[i] == id) + break; + + if (i == ARRAY_SIZE(lb_ids)) + return ERR_PTR(-EINVAL); + + lb = dpu->lb_priv[i]; + + mutex_lock(&lb->mutex); + + if (lb->inuse) { + mutex_unlock(&lb->mutex); + return ERR_PTR(-EBUSY); + } + + lb->inuse = true; + + mutex_unlock(&lb->mutex); + + return lb; +} +EXPORT_SYMBOL_GPL(dpu_lb_get); + +void dpu_lb_put(struct dpu_layerblend *lb) +{ + mutex_lock(&lb->mutex); + + lb->inuse = false; + + mutex_unlock(&lb->mutex); +} +EXPORT_SYMBOL_GPL(dpu_lb_put); + +void _dpu_lb_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_layerblend *lb; + int i; + + for (i = 0; i < ARRAY_SIZE(lb_ids); i++) + if (lb_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(lb_ids))) + return; + + lb = dpu->lb_priv[i]; + + layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE); + layerblend_pixengcfg_dynamic_sec_sel(lb, LB_SEC_SEL__DISABLE); + layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC); + layerblend_shdldsel(lb, BOTH); + layerblend_shdtoksel(lb, BOTH); + layerblend_shden(lb, true); +} + +int dpu_lb_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_layerblend *lb; + int ret; + + lb = devm_kzalloc(dpu->dev, sizeof(*lb), GFP_KERNEL); + if (!lb) + return -ENOMEM; + + dpu->lb_priv[id] = lb; + + lb->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16); + if (!lb->pec_base) + return -ENOMEM; + + lb->base = devm_ioremap(dpu->dev, base, SZ_32); + if (!lb->base) + return -ENOMEM; + + lb->dpu = dpu; + lb->id = id; + mutex_init(&lb->mutex); + + ret = layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE); + if (ret < 0) + return ret; + + _dpu_lb_init(dpu, id); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-prv.h b/drivers/gpu/imx/dpu/dpu-prv.h new file mode 100644 index 0000000000000000000000000000000000000000..d19b4483715e45a76261ecf7ce2425a282c53c0f --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-prv.h @@ -0,0 +1,441 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef __DPU_PRV_H__ +#define __DPU_PRV_H__ + +#include <drm/drm_fourcc.h> +#include <video/dpu.h> + +#define NA 0xDEADBEEF /* not available */ + +#define STATICCONTROL 0x8 +#define SHDLDREQSTICKY(lm) (((lm) & 0xFF) << 24) +#define SHDLDREQSTICKY_MASK (0xFF << 24) +#define BASEADDRESSAUTOUPDATE(lm) (((lm) & 0xFF) << 16) +#define BASEADDRESSAUTOUPDATE_MASK (0xFF << 16) +#define SHDEN BIT(0) +#define BURSTBUFFERMANAGEMENT 0xC +#define SETNUMBUFFERS(n) ((n) & 0xFF) +#define SETBURSTLENGTH(n) (((n) & 0x1F) << 8) +#define SETBURSTLENGTH_MASK 0x1F00 +#define LINEMODE_MASK 0x80000000U +#define LINEMODE_SHIFT 31U +enum linemode { + /* + * Mandatory setting for operation in the Display Controller. + * Works also for Blit Engine with marginal performance impact. + */ + LINEMODE__DISPLAY = 0, + /* Recommended setting for operation in the Blit Engine. */ + LINEMODE__BLIT = 1 << LINEMODE_SHIFT, +}; + +#define BITSPERPIXEL(bpp) (((bpp) & 0x3F) << 16) +#define STRIDE(n) (((n) - 1) & 0xFFFF) +#define LINEWIDTH(w) (((w) - 1) & 0x3FFF) +#define LINECOUNT(h) ((((h) - 1) & 0x3FFF) << 16) +#define ITUFORMAT BIT(31) +#define R_BITS(n) (((n) & 0xF) << 24) +#define G_BITS(n) (((n) & 0xF) << 16) +#define B_BITS(n) (((n) & 0xF) << 8) +#define A_BITS(n) ((n) & 0xF) +#define R_SHIFT(n) (((n) & 0x1F) << 24) +#define G_SHIFT(n) (((n) & 0x1F) << 16) +#define B_SHIFT(n) (((n) & 0x1F) << 8) +#define A_SHIFT(n) ((n) & 0x1F) +#define Y_BITS(n) R_BITS(n) +#define Y_BITS_MASK 0xF000000 +#define U_BITS(n) G_BITS(n) +#define U_BITS_MASK 0xF0000 +#define V_BITS(n) B_BITS(n) +#define V_BITS_MASK 0xF00 +#define Y_SHIFT(n) R_SHIFT(n) +#define Y_SHIFT_MASK 0x1F000000 +#define U_SHIFT(n) G_SHIFT(n) +#define U_SHIFT_MASK 0x1F0000 +#define V_SHIFT(n) B_SHIFT(n) +#define V_SHIFT_MASK 0x1F00 +#define LAYERXOFFSET(x) ((x) & 0x7FFF) +#define LAYERYOFFSET(y) (((y) & 0x7FFF) << 16) +#define CLIPWINDOWXOFFSET(x) ((x) & 0x7FFF) +#define CLIPWINDOWYOFFSET(y) (((y) & 0x7FFF) << 16) +#define CLIPWINDOWWIDTH(w) (((w) - 1) & 0x3FFF) +#define CLIPWINDOWHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define PALETTEENABLE BIT(0) +typedef enum { + TILE_FILL_ZERO, + TILE_FILL_CONSTANT, + TILE_PAD, + TILE_PAD_ZERO, +} tilemode_t; +#define ALPHASRCENABLE BIT(8) +#define ALPHACONSTENABLE BIT(9) +#define ALPHAMASKENABLE BIT(10) +#define ALPHATRANSENABLE BIT(11) +#define RGBALPHASRCENABLE BIT(12) +#define RGBALPHACONSTENABLE BIT(13) +#define RGBALPHAMASKENABLE BIT(14) +#define RGBALPHATRANSENABLE BIT(15) +#define PREMULCONSTRGB BIT(16) +typedef enum { + YUVCONVERSIONMODE__OFF, + YUVCONVERSIONMODE__ITU601, + YUVCONVERSIONMODE__ITU601_FR, + YUVCONVERSIONMODE__ITU709, +} yuvconversionmode_t; +#define YUVCONVERSIONMODE_MASK 0x60000 +#define YUVCONVERSIONMODE(m) (((m) & 0x3) << 17) +#define GAMMAREMOVEENABLE BIT(20) +#define CLIPWINDOWENABLE BIT(30) +#define SOURCEBUFFERENABLE BIT(31) +#define EMPTYFRAME BIT(31) +#define FRAMEWIDTH(w) (((w) - 1) & 0x3FFF) +#define FRAMEHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16) +#define DELTAX_MASK 0x3F000 +#define DELTAY_MASK 0xFC0000 +#define DELTAX(x) (((x) & 0x3F) << 12) +#define DELTAY(y) (((y) & 0x3F) << 18) +#define YUV422UPSAMPLINGMODE_MASK BIT(5) +#define YUV422UPSAMPLINGMODE(m) (((m) & 0x1) << 5) +typedef enum { + YUV422UPSAMPLINGMODE__REPLICATE, + YUV422UPSAMPLINGMODE__INTERPOLATE, +} yuv422upsamplingmode_t; +#define INPUTSELECT_MASK 0x18 +#define INPUTSELECT(s) (((s) & 0x3) << 3) +typedef enum { + INPUTSELECT__INACTIVE, + INPUTSELECT__COMPPACK, + INPUTSELECT__ALPHAMASK, + INPUTSELECT__COORDINATE, +} inputselect_t; +#define RASTERMODE_MASK 0x7 +#define RASTERMODE(m) ((m) & 0x7) +typedef enum { + RASTERMODE__NORMAL, + RASTERMODE__DECODE, + RASTERMODE__ARBITRARY, + RASTERMODE__PERSPECTIVE, + RASTERMODE__YUV422, + RASTERMODE__AFFINE, +} rastermode_t; +#define SHDTOKGEN BIT(0) +#define FETCHTYPE_MASK 0xF + +#define DPU_FRAC_PLANE_LAYER_NUM 8 + +enum { + DPU_V1, + DPU_V2, +}; + +#define DPU_VPROC_CAP_HSCALER4 BIT(0) +#define DPU_VPROC_CAP_VSCALER4 BIT(1) +#define DPU_VPROC_CAP_HSCALER5 BIT(2) +#define DPU_VPROC_CAP_VSCALER5 BIT(3) +#define DPU_VPROC_CAP_FETCHECO0 BIT(4) +#define DPU_VPROC_CAP_FETCHECO1 BIT(5) + +#define DPU_VPROC_CAP_HSCALE (DPU_VPROC_CAP_HSCALER4 | \ + DPU_VPROC_CAP_HSCALER5) +#define DPU_VPROC_CAP_VSCALE (DPU_VPROC_CAP_VSCALER4 | \ + DPU_VPROC_CAP_VSCALER5) +#define DPU_VPROC_CAP_FETCHECO (DPU_VPROC_CAP_FETCHECO0 | \ + DPU_VPROC_CAP_FETCHECO1) + +struct dpu_unit { + char *name; + unsigned int num; + const unsigned int *ids; + const unsigned long *pec_ofss; /* PixEngCFG */ + const unsigned long *ofss; + const unsigned int *dprc_ids; +}; + +struct cm_reg_ofs { + u32 ipidentifier; + u32 lockunlock; + u32 lockstatus; + u32 userinterruptmask; + u32 interruptenable; + u32 interruptpreset; + u32 interruptclear; + u32 interruptstatus; + u32 userinterruptenable; + u32 userinterruptpreset; + u32 userinterruptclear; + u32 userinterruptstatus; + u32 generalpurpose; +}; + +struct dpu_devtype { + unsigned long cm_ofs; /* common */ + const struct dpu_unit *cfs; + const struct dpu_unit *decs; + const struct dpu_unit *eds; + const struct dpu_unit *fds; + const struct dpu_unit *fes; + const struct dpu_unit *fgs; + const struct dpu_unit *fls; + const struct dpu_unit *fws; + const struct dpu_unit *hss; + const struct dpu_unit *lbs; + const struct dpu_unit *sts; + const struct dpu_unit *tcons; + const struct dpu_unit *vss; + const struct cm_reg_ofs *cm_reg_ofs; + const unsigned int *intsteer_map; + unsigned int intsteer_map_size; + const unsigned long *unused_irq; + const unsigned int *sw2hw_irq_map; /* NULL means linear */ + const unsigned int *sw2hw_block_id_map; /* NULL means linear */ + + unsigned int syncmode_min_prate; /* need pixel combiner, KHz */ + unsigned int singlemode_max_width; + unsigned int master_stream_id; + + /* + * index: 0 1 2 3 4 5 6 + * source: fl0(sub0) fl1(sub0) fw2(sub0) fd0 fd1 fd2 fd3 + */ + u32 plane_src_na_mask; + bool has_capture; + bool has_prefetch; + bool has_disp_sel_clk; + bool has_dual_ldb; + bool has_pc; + bool has_syncmode_fixup; + bool pixel_link_quirks; + bool pixel_link_nhvsync; /* HSYNC and VSYNC high active */ + unsigned int version; +}; + +struct dpu_soc { + struct device *dev; + const struct dpu_devtype *devtype; + spinlock_t lock; + + void __iomem *cm_reg; + + int id; + int usecount; + + struct regmap *intsteer_regmap; + int intsteer_usecount; + spinlock_t intsteer_lock; + int irq_cm; /* irq common */ + int irq_stream0a; + int irq_stream1a; + int irq_reserved0; + int irq_reserved1; + int irq_blit; + int irq_dpr0; + int irq_dpr1; + struct irq_domain *domain; + + struct dpu_constframe *cf_priv[4]; + struct dpu_disengcfg *dec_priv[2]; + struct dpu_extdst *ed_priv[4]; + struct dpu_fetchunit *fd_priv[4]; + struct dpu_fetchunit *fe_priv[4]; + struct dpu_framegen *fg_priv[2]; + struct dpu_fetchunit *fl_priv[2]; + struct dpu_fetchunit *fw_priv[1]; + struct dpu_hscaler *hs_priv[3]; + struct dpu_layerblend *lb_priv[7]; + struct dpu_store *st_priv[1]; + struct dpu_tcon *tcon_priv[2]; + struct dpu_vscaler *vs_priv[3]; +}; + +int dpu_format_horz_chroma_subsampling(u32 format); +int dpu_format_vert_chroma_subsampling(u32 format); +int dpu_format_num_planes(u32 format); +int dpu_format_plane_width(int width, u32 format, int plane); +int dpu_format_plane_height(int height, u32 format, int plane); + +#define _DECLARE_DPU_UNIT_INIT_FUNC(block) \ +void _dpu_##block##_init(struct dpu_soc *dpu, unsigned int id) \ + +_DECLARE_DPU_UNIT_INIT_FUNC(cf); +_DECLARE_DPU_UNIT_INIT_FUNC(dec); +_DECLARE_DPU_UNIT_INIT_FUNC(ed); +_DECLARE_DPU_UNIT_INIT_FUNC(fd); +_DECLARE_DPU_UNIT_INIT_FUNC(fe); +_DECLARE_DPU_UNIT_INIT_FUNC(fg); +_DECLARE_DPU_UNIT_INIT_FUNC(fl); +_DECLARE_DPU_UNIT_INIT_FUNC(fw); +_DECLARE_DPU_UNIT_INIT_FUNC(hs); +_DECLARE_DPU_UNIT_INIT_FUNC(lb); +_DECLARE_DPU_UNIT_INIT_FUNC(tcon); +_DECLARE_DPU_UNIT_INIT_FUNC(vs); + +#define DECLARE_DPU_UNIT_INIT_FUNC(block) \ +int dpu_##block##_init(struct dpu_soc *dpu, unsigned int id, \ + unsigned long pec_base, unsigned long base) + +DECLARE_DPU_UNIT_INIT_FUNC(cf); +DECLARE_DPU_UNIT_INIT_FUNC(dec); +DECLARE_DPU_UNIT_INIT_FUNC(ed); +DECLARE_DPU_UNIT_INIT_FUNC(fd); +DECLARE_DPU_UNIT_INIT_FUNC(fe); +DECLARE_DPU_UNIT_INIT_FUNC(fg); +DECLARE_DPU_UNIT_INIT_FUNC(fl); +DECLARE_DPU_UNIT_INIT_FUNC(fw); +DECLARE_DPU_UNIT_INIT_FUNC(hs); +DECLARE_DPU_UNIT_INIT_FUNC(lb); +DECLARE_DPU_UNIT_INIT_FUNC(st); +DECLARE_DPU_UNIT_INIT_FUNC(tcon); +DECLARE_DPU_UNIT_INIT_FUNC(vs); + +static inline u32 dpu_pec_fu_read(struct dpu_fetchunit *fu, unsigned int offset) +{ + return readl(fu->pec_base + offset); +} + +static inline void dpu_pec_fu_write(struct dpu_fetchunit *fu, u32 value, + unsigned int offset) +{ + writel(value, fu->pec_base + offset); +} + +static inline u32 dpu_fu_read(struct dpu_fetchunit *fu, unsigned int offset) +{ + return readl(fu->base + offset); +} + +static inline void dpu_fu_write(struct dpu_fetchunit *fu, u32 value, + unsigned int offset) +{ + writel(value, fu->base + offset); +} + +static inline u32 rgb_color(u8 r, u8 g, u8 b, u8 a) +{ + return (r << 24) | (g << 16) | (b << 8) | a; +} + +static inline u32 yuv_color(u8 y, u8 u, u8 v) +{ + return (y << 24) | (u << 16) | (v << 8); +} + +void tcon_get_pc(struct dpu_tcon *tcon, void *data); + +static const unsigned int cf_ids[] = {0, 1, 4, 5}; +static const unsigned int dec_ids[] = {0, 1}; +static const unsigned int ed_ids[] = {0, 1, 4, 5}; +static const unsigned int fd_ids[] = {0, 1, 2, 3}; +static const unsigned int fe_ids[] = {0, 1, 2, 9}; +static const unsigned int fg_ids[] = {0, 1}; +static const unsigned int fl_ids[] = {0, 1}; +static const unsigned int fw_ids[] = {2}; +static const unsigned int hs_ids[] = {4, 5, 9}; +static const unsigned int lb_ids[] = {0, 1, 2, 3, 4, 5, 6}; +static const unsigned int st_ids[] = {9}; +static const unsigned int tcon_ids[] = {0, 1}; +static const unsigned int vs_ids[] = {4, 5, 9}; + +static const unsigned int fd_dprc_ids[] = {3, 4}; +static const unsigned int fl_dprc_ids[] = {2}; +static const unsigned int fw_dprc_ids[] = {5}; + +struct dpu_pixel_format { + u32 pixel_format; + u32 bits; + u32 shift; +}; + +static const struct dpu_pixel_format dpu_pixel_format_matrix[] = { + { + DRM_FORMAT_ARGB8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(24), + }, { + DRM_FORMAT_XRGB8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_ABGR8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(24), + }, { + DRM_FORMAT_XBGR8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0), + }, { + DRM_FORMAT_RGBA8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_RGBX8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_BGRA8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8), + R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0), + }, { + DRM_FORMAT_BGRX8888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0), + }, { + DRM_FORMAT_RGB888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_BGR888, + R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0), + R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0), + }, { + DRM_FORMAT_RGB565, + R_BITS(5) | G_BITS(6) | B_BITS(5) | A_BITS(0), + R_SHIFT(11) | G_SHIFT(5) | B_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_YUYV, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_UYVY, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(8) | U_SHIFT(0) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV12, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV21, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV16, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV61, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, { + DRM_FORMAT_NV24, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0), + }, { + DRM_FORMAT_NV42, + Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0), + Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0), + }, +}; + +#endif /* __DPU_PRV_H__ */ diff --git a/drivers/gpu/imx/dpu/dpu-store.c b/drivers/gpu/imx/dpu/dpu-store.c new file mode 100644 index 0000000000000000000000000000000000000000..a70bfc2e5394f6c660e280fccf8012fc57fb2296 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-store.c @@ -0,0 +1,140 @@ +/* + * Copyright 2018-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include "dpu-prv.h" + +#define PIXENGCFG_STATIC 0x8 + +struct dpu_store { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; +}; + +static inline u32 dpu_pec_st_read(struct dpu_store *st, unsigned int offset) +{ + return readl(st->pec_base + offset); +} + +static inline void dpu_pec_st_write(struct dpu_store *st, u32 value, + unsigned int offset) +{ + writel(value, st->pec_base + offset); +} + +void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable) +{ + struct dpu_soc *dpu; + u32 val; + + if (!st) + return; + + dpu = st->dpu; + + if (!dpu->devtype->has_syncmode_fixup) + return; + + mutex_lock(&st->mutex); + val = dpu_pec_st_read(st, PIXENGCFG_STATIC); + if (enable) + val |= BIT(16); + else + val &= ~BIT(16); + dpu_pec_st_write(st, val, PIXENGCFG_STATIC); + mutex_unlock(&st->mutex); +} +EXPORT_SYMBOL_GPL(store_pixengcfg_syncmode_fixup); + +struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id) +{ + struct dpu_store *st; + int i; + + for (i = 0; i < ARRAY_SIZE(st_ids); i++) + if (st_ids[i] == id) + break; + + if (i == ARRAY_SIZE(st_ids)) + return ERR_PTR(-EINVAL); + + st = dpu->st_priv[i]; + + mutex_lock(&st->mutex); + + if (st->inuse) { + mutex_unlock(&st->mutex); + return ERR_PTR(-EBUSY); + } + + st->inuse = true; + + mutex_unlock(&st->mutex); + + return st; +} +EXPORT_SYMBOL_GPL(dpu_st_get); + +void dpu_st_put(struct dpu_store *st) +{ + mutex_lock(&st->mutex); + + st->inuse = false; + + mutex_unlock(&st->mutex); +} +EXPORT_SYMBOL_GPL(dpu_st_put); + +int dpu_st_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_store *st; + int i; + + st = devm_kzalloc(dpu->dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(st_ids); i++) + if (st_ids[i] == id) + break; + + if (i == ARRAY_SIZE(st_ids)) + return -EINVAL; + + dpu->st_priv[i] = st; + + st->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32); + if (!st->pec_base) + return -ENOMEM; + + st->base = devm_ioremap(dpu->dev, base, SZ_256); + if (!st->base) + return -ENOMEM; + + st->dpu = dpu; + st->id = id; + + mutex_init(&st->mutex); + + return 0; +} diff --git a/drivers/gpu/imx/dpu/dpu-tcon.c b/drivers/gpu/imx/dpu/dpu-tcon.c new file mode 100644 index 0000000000000000000000000000000000000000..546a77d75800983403fbe61cc5b95f0b56828042 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-tcon.c @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include <video/imx8-pc.h> +#include "dpu-prv.h" + +#define SSQCNTS 0 +#define SSQCYCLE 0x8 +#define SWRESET 0xC +#define TCON_CTRL 0x10 +#define BYPASS BIT(3) +#define RSDSINVCTRL 0x14 +#define MAPBIT3_0 0x18 +#define MAPBIT7_4 0x1C +#define MAPBIT11_8 0x20 +#define MAPBIT15_12 0x24 +#define MAPBIT19_16 0x28 +#define MAPBIT23_20 0x2C +#define MAPBIT27_24 0x30 +#define MAPBIT31_28 0x34 +#define MAPBIT34_32 0x38 +#define MAPBIT3_0_DUAL 0x3C +#define MAPBIT7_4_DUAL 0x40 +#define MAPBIT11_8_DUAL 0x44 +#define MAPBIT15_12_DUAL 0x48 +#define MAPBIT19_16_DUAL 0x4C +#define MAPBIT23_20_DUAL 0x50 +#define MAPBIT27_24_DUAL 0x54 +#define MAPBIT31_28_DUAL 0x58 +#define MAPBIT34_32_DUAL 0x5C +#define SPGPOSON(n) (0x60 + (n) * 16) +#define X(n) (((n) & 0x7FFF) << 16) +#define Y(n) ((n) & 0x7FFF) +#define SPGMASKON(n) (0x64 + (n) * 16) +#define SPGPOSOFF(n) (0x68 + (n) * 16) +#define SPGMASKOFF(n) (0x6C + (n) * 16) +#define SMXSIGS(n) (0x120 + (n) * 8) +#define SMXFCTTABLE(n) (0x124 + (n) * 8) +#define RESET_OVER_UNFERFLOW 0x180 +#define DUAL_DEBUG 0x184 + +struct dpu_tcon { + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + struct pc *pc; +}; + +static inline u32 dpu_tcon_read(struct dpu_tcon *tcon, unsigned int offset) +{ + return readl(tcon->base + offset); +} + +static inline void dpu_tcon_write(struct dpu_tcon *tcon, u32 value, + unsigned int offset) +{ + writel(value, tcon->base + offset); +} + +int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format) +{ + mutex_lock(&tcon->mutex); + switch (bus_format) { + case MEDIA_BUS_FMT_RGB888_1X24: + dpu_tcon_write(tcon, 0x19181716, MAPBIT3_0); + dpu_tcon_write(tcon, 0x1d1c1b1a, MAPBIT7_4); + dpu_tcon_write(tcon, 0x0f0e0d0c, MAPBIT11_8); + dpu_tcon_write(tcon, 0x13121110, MAPBIT15_12); + dpu_tcon_write(tcon, 0x05040302, MAPBIT19_16); + dpu_tcon_write(tcon, 0x09080706, MAPBIT23_20); + break; + case MEDIA_BUS_FMT_RGB101010_1X30: + case MEDIA_BUS_FMT_RGB888_1X30_PADLO: + case MEDIA_BUS_FMT_RGB666_1X30_PADLO: + dpu_tcon_write(tcon, 0x17161514, MAPBIT3_0); + dpu_tcon_write(tcon, 0x1b1a1918, MAPBIT7_4); + dpu_tcon_write(tcon, 0x0b0a1d1c, MAPBIT11_8); + dpu_tcon_write(tcon, 0x0f0e0d0c, MAPBIT15_12); + dpu_tcon_write(tcon, 0x13121110, MAPBIT19_16); + dpu_tcon_write(tcon, 0x03020100, MAPBIT23_20); + dpu_tcon_write(tcon, 0x07060504, MAPBIT27_24); + dpu_tcon_write(tcon, 0x00000908, MAPBIT31_28); + break; + default: + mutex_unlock(&tcon->mutex); + return -EINVAL; + } + mutex_unlock(&tcon->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(tcon_set_fmt); + +/* This function is used to workaround TKT320590 which is related to DPR/PRG. */ +void tcon_set_operation_mode(struct dpu_tcon *tcon) +{ + u32 val; + + mutex_lock(&tcon->mutex); + val = dpu_tcon_read(tcon, TCON_CTRL); + val &= ~BYPASS; + dpu_tcon_write(tcon, val, TCON_CTRL); + mutex_unlock(&tcon->mutex); +} +EXPORT_SYMBOL_GPL(tcon_set_operation_mode); + +void tcon_cfg_videomode(struct dpu_tcon *tcon, + struct drm_display_mode *m, bool side_by_side) +{ + struct dpu_soc *dpu = tcon->dpu; + const struct dpu_devtype *devtype = dpu->devtype; + u32 val; + int hdisplay, hsync_start, hsync_end; + int vdisplay, vsync_start, vsync_end; + int y; + + hdisplay = m->hdisplay; + vdisplay = m->vdisplay; + hsync_start = m->hsync_start; + vsync_start = m->vsync_start; + hsync_end = m->hsync_end; + vsync_end = m->vsync_end; + + if (side_by_side) { + hdisplay /= 2; + hsync_start /= 2; + hsync_end /= 2; + } + + mutex_lock(&tcon->mutex); + /* + * TKT320590: + * Turn TCON into operation mode later after the first dumb frame is + * generated by DPU. This makes DPR/PRG be able to evade the frame. + */ + val = dpu_tcon_read(tcon, TCON_CTRL); + val |= BYPASS; + dpu_tcon_write(tcon, val, TCON_CTRL); + + /* dsp_control[0]: hsync */ + dpu_tcon_write(tcon, X(hsync_start), SPGPOSON(0)); + dpu_tcon_write(tcon, 0xffff, SPGMASKON(0)); + + dpu_tcon_write(tcon, X(hsync_end), SPGPOSOFF(0)); + dpu_tcon_write(tcon, 0xffff, SPGMASKOFF(0)); + + dpu_tcon_write(tcon, 0x2, SMXSIGS(0)); + dpu_tcon_write(tcon, 0x1, SMXFCTTABLE(0)); + + /* dsp_control[1]: vsync */ + dpu_tcon_write(tcon, X(hsync_start) | Y(vsync_start - 1), SPGPOSON(1)); + dpu_tcon_write(tcon, 0x0, SPGMASKON(1)); + + dpu_tcon_write(tcon, X(hsync_start) | Y(vsync_end - 1), SPGPOSOFF(1)); + dpu_tcon_write(tcon, 0x0, SPGMASKOFF(1)); + + dpu_tcon_write(tcon, 0x3, SMXSIGS(1)); + dpu_tcon_write(tcon, 0x1, SMXFCTTABLE(1)); + + /* dsp_control[2]: data enable */ + /* horizontal */ + dpu_tcon_write(tcon, 0x0, SPGPOSON(2)); + dpu_tcon_write(tcon, 0xffff, SPGMASKON(2)); + + dpu_tcon_write(tcon, X(hdisplay), SPGPOSOFF(2)); + dpu_tcon_write(tcon, 0xffff, SPGMASKOFF(2)); + + /* vertical */ + dpu_tcon_write(tcon, 0x0, SPGPOSON(3)); + dpu_tcon_write(tcon, 0x7fff0000, SPGMASKON(3)); + + dpu_tcon_write(tcon, Y(vdisplay), SPGPOSOFF(3)); + dpu_tcon_write(tcon, 0x7fff0000, SPGMASKOFF(3)); + + dpu_tcon_write(tcon, 0x2c, SMXSIGS(2)); + dpu_tcon_write(tcon, 0x8, SMXFCTTABLE(2)); + + /* dsp_control[3]: kachuck */ + y = vdisplay + 1; + /* + * If sync mode fixup is present, the kachuck signal from slave tcon + * should be one line later than the one from master tcon. + */ + if (side_by_side && tcon_is_slave(tcon) && devtype->has_syncmode_fixup) + y++; + + dpu_tcon_write(tcon, X(0x0) | Y(y), SPGPOSON(4)); + dpu_tcon_write(tcon, 0x0, SPGMASKON(4)); + + dpu_tcon_write(tcon, X(0x20) | Y(y), SPGPOSOFF(4)); + dpu_tcon_write(tcon, 0x0, SPGMASKOFF(4)); + + dpu_tcon_write(tcon, 0x6, SMXSIGS(3)); + dpu_tcon_write(tcon, 0x2, SMXFCTTABLE(3)); + mutex_unlock(&tcon->mutex); +} +EXPORT_SYMBOL_GPL(tcon_cfg_videomode); + +bool tcon_is_master(struct dpu_tcon *tcon) +{ + const struct dpu_devtype *devtype = tcon->dpu->devtype; + + return tcon->id == devtype->master_stream_id; +} +EXPORT_SYMBOL_GPL(tcon_is_master); + +bool tcon_is_slave(struct dpu_tcon *tcon) +{ + return !tcon_is_master(tcon); +} +EXPORT_SYMBOL_GPL(tcon_is_slave); + +void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di, + unsigned int frame_width, u32 mode, u32 format) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_configure(tcon->pc, di, frame_width, mode, format); +} +EXPORT_SYMBOL_GPL(tcon_configure_pc); + +void tcon_enable_pc(struct dpu_tcon *tcon) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_enable(tcon->pc); +} +EXPORT_SYMBOL_GPL(tcon_enable_pc); + +void tcon_disable_pc(struct dpu_tcon *tcon) +{ + if (WARN_ON(!tcon || !tcon->pc)) + return; + + pc_disable(tcon->pc); +} +EXPORT_SYMBOL_GPL(tcon_disable_pc); + +struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id) +{ + struct dpu_tcon *tcon; + int i; + + for (i = 0; i < ARRAY_SIZE(tcon_ids); i++) + if (tcon_ids[i] == id) + break; + + if (i == ARRAY_SIZE(tcon_ids)) + return ERR_PTR(-EINVAL); + + tcon = dpu->tcon_priv[i]; + + mutex_lock(&tcon->mutex); + + if (tcon->inuse) { + mutex_unlock(&tcon->mutex); + return ERR_PTR(-EBUSY); + } + + tcon->inuse = true; + + mutex_unlock(&tcon->mutex); + + return tcon; +} +EXPORT_SYMBOL_GPL(dpu_tcon_get); + +void dpu_tcon_put(struct dpu_tcon *tcon) +{ + mutex_lock(&tcon->mutex); + + tcon->inuse = false; + + mutex_unlock(&tcon->mutex); +} +EXPORT_SYMBOL_GPL(dpu_tcon_put); + +struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon) +{ + return tcon->dpu->tcon_priv[tcon->id ^ 1]; +} +EXPORT_SYMBOL_GPL(dpu_aux_tcon_peek); + +void _dpu_tcon_init(struct dpu_soc *dpu, unsigned int id) +{ +} + +int dpu_tcon_init(struct dpu_soc *dpu, unsigned int id, + unsigned long unused, unsigned long base) +{ + struct dpu_tcon *tcon; + + tcon = devm_kzalloc(dpu->dev, sizeof(*tcon), GFP_KERNEL); + if (!tcon) + return -ENOMEM; + + dpu->tcon_priv[id] = tcon; + + tcon->base = devm_ioremap(dpu->dev, base, SZ_512); + if (!tcon->base) + return -ENOMEM; + + tcon->dpu = dpu; + mutex_init(&tcon->mutex); + + return 0; +} + +void tcon_get_pc(struct dpu_tcon *tcon, void *data) +{ + if (WARN_ON(!tcon)) + return; + + tcon->pc = data; +} diff --git a/drivers/gpu/imx/dpu/dpu-vscaler.c b/drivers/gpu/imx/dpu/dpu-vscaler.c new file mode 100644 index 0000000000000000000000000000000000000000..37ddeb6e5d7bbc1f81976cc0c281b203cc488503 --- /dev/null +++ b/drivers/gpu/imx/dpu/dpu-vscaler.c @@ -0,0 +1,447 @@ +/* + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <video/dpu.h> +#include "dpu-prv.h" + +#define PIXENGCFG_DYNAMIC 0x8 +#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F + +#define SETUP1 0xC +#define SCALE_FACTOR_MASK 0xFFFFF +#define SCALE_FACTOR(n) ((n) & 0xFFFFF) +#define SETUP2 0x10 +#define SETUP3 0x14 +#define SETUP4 0x18 +#define SETUP5 0x1C +#define PHASE_OFFSET_MASK 0x1FFFFF +#define PHASE_OFFSET(n) ((n) & 0x1FFFFF) +#define CONTROL 0x20 +#define OUTPUT_SIZE_MASK 0x3FFF0000 +#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK) +#define FIELD_MODE 0x3000 +#define FILTER_MODE 0x100 +#define SCALE_MODE 0x10 +#define MODE 0x1 + +static const vs_src_sel_t src_sels[3][6] = { + { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__EXTSRC4, + VS_SRC_SEL__FETCHDECODE0, + VS_SRC_SEL__FETCHDECODE2, + VS_SRC_SEL__MATRIX4, + VS_SRC_SEL__HSCALER4, + }, { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__EXTSRC5, + VS_SRC_SEL__FETCHDECODE1, + VS_SRC_SEL__FETCHDECODE3, + VS_SRC_SEL__MATRIX5, + VS_SRC_SEL__HSCALER5, + }, { + VS_SRC_SEL__DISABLE, + VS_SRC_SEL__MATRIX9, + VS_SRC_SEL__HSCALER9, + }, +}; + +struct dpu_vscaler { + void __iomem *pec_base; + void __iomem *base; + struct mutex mutex; + int id; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; +}; + +static inline u32 dpu_pec_vs_read(struct dpu_vscaler *vs, + unsigned int offset) +{ + return readl(vs->pec_base + offset); +} + +static inline void dpu_pec_vs_write(struct dpu_vscaler *vs, u32 value, + unsigned int offset) +{ + writel(value, vs->pec_base + offset); +} + +static inline u32 dpu_vs_read(struct dpu_vscaler *vs, unsigned int offset) +{ + return readl(vs->base + offset); +} + +static inline void dpu_vs_write(struct dpu_vscaler *vs, u32 value, + unsigned int offset) +{ + writel(value, vs->base + offset); +} + +int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src) +{ + struct dpu_soc *dpu = vs->dpu; + const unsigned int *block_id_map = dpu->devtype->sw2hw_block_id_map; + const unsigned int vs_id_array[] = {4, 5, 9}; + int i, j; + u32 val, mapped_src; + + for (i = 0; i < ARRAY_SIZE(vs_id_array); i++) + if (vs_id_array[i] == vs->id) + break; + + if (WARN_ON(i == ARRAY_SIZE(vs_id_array))) + return -EINVAL; + + mutex_lock(&vs->mutex); + for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) { + if (src_sels[i][j] == src) { + mapped_src = block_id_map ? block_id_map[src] : src; + if (WARN_ON(mapped_src == NA)) + return -EINVAL; + + val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC); + val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK; + val |= mapped_src; + dpu_pec_vs_write(vs, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&vs->mutex); + return 0; + } + } + mutex_unlock(&vs->mutex); + + dev_err(dpu->dev, "Invalid source for VScaler%d\n", vs->id); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(vscaler_pixengcfg_dynamic_src_sel); + +void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC); + val &= ~CLKEN_MASK; + val |= clken << CLKEN_MASK_SHIFT; + dpu_pec_vs_write(vs, val, PIXENGCFG_DYNAMIC); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_pixengcfg_clken); + +void vscaler_shden(struct dpu_vscaler *vs, bool enable) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, STATICCONTROL); + if (enable) + val |= SHDEN; + else + val &= ~SHDEN; + dpu_vs_write(vs, val, STATICCONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_shden); + +void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace) +{ + struct dpu_soc *dpu = vs->dpu; + u32 scale_factor; + u64 tmp64; + + if (deinterlace) + dst *= 2; + + if (src == dst) { + scale_factor = 0x80000; + } else { + if (src > dst) { + tmp64 = (u64)((u64)dst * 0x80000); + do_div(tmp64, src); + + } else { + tmp64 = (u64)((u64)src * 0x80000); + do_div(tmp64, dst); + } + scale_factor = (u32)tmp64; + } + + WARN_ON(scale_factor > 0x80000); + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, SCALE_FACTOR(scale_factor), SETUP1); + mutex_unlock(&vs->mutex); + + dev_dbg(dpu->dev, "Vscaler%d scale factor 0x%08x\n", + vs->id, scale_factor); +} +EXPORT_SYMBOL_GPL(vscaler_setup1); + +void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace) +{ + /* 0x20000: +0.25 phase offset for deinterlace */ + u32 phase_offset = deinterlace ? 0x20000 : 0; + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, PHASE_OFFSET(phase_offset), SETUP2); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup2); + +void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace) +{ + /* 0x1e0000: -0.25 phase offset for deinterlace */ + u32 phase_offset = deinterlace ? 0x1e0000 : 0; + + mutex_lock(&vs->mutex); + dpu_vs_write(vs, PHASE_OFFSET(phase_offset), SETUP3); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup3); + +void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset) +{ + mutex_lock(&vs->mutex); + dpu_vs_write(vs, PHASE_OFFSET(phase_offset), SETUP4); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup4); + +void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset) +{ + mutex_lock(&vs->mutex); + dpu_vs_write(vs, PHASE_OFFSET(phase_offset), SETUP5); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_setup5); + +void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~OUTPUT_SIZE_MASK; + val |= OUTPUT_SIZE(line_num); + dpu_vs_write(vs, val, CONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_output_size); + +void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~FIELD_MODE; + val |= m; + dpu_vs_write(vs, val, CONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_field_mode); + +void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~FILTER_MODE; + val |= m; + dpu_vs_write(vs, val, CONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_filter_mode); + +void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~SCALE_MODE; + val |= m; + dpu_vs_write(vs, val, CONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_scale_mode); + +void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + val &= ~MODE; + val |= m; + dpu_vs_write(vs, val, CONTROL); + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(vscaler_mode); + +bool vscaler_is_enabled(struct dpu_vscaler *vs) +{ + u32 val; + + mutex_lock(&vs->mutex); + val = dpu_vs_read(vs, CONTROL); + mutex_unlock(&vs->mutex); + + return (val & MODE) == SCALER_ACTIVE; +} +EXPORT_SYMBOL_GPL(vscaler_is_enabled); + +dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs) +{ + switch (vs->id) { + case 4: + return ID_VSCALER4; + case 5: + return ID_VSCALER5; + case 9: + return ID_VSCALER9; + default: + WARN_ON(1); + } + + return ID_NONE; +} +EXPORT_SYMBOL_GPL(vscaler_get_block_id); + +unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs) +{ + return vs->stream_id; +} +EXPORT_SYMBOL_GPL(vscaler_get_stream_id); + +void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id) +{ + switch (id) { + case DPU_PLANE_SRC_TO_DISP_STREAM0: + case DPU_PLANE_SRC_TO_DISP_STREAM1: + case DPU_PLANE_SRC_DISABLED: + vs->stream_id = id; + break; + default: + WARN_ON(1); + } +} +EXPORT_SYMBOL_GPL(vscaler_set_stream_id); + +struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id) +{ + struct dpu_vscaler *vs; + int i; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(vs_ids)) + return ERR_PTR(-EINVAL); + + vs = dpu->vs_priv[i]; + + mutex_lock(&vs->mutex); + + if (vs->inuse) { + mutex_unlock(&vs->mutex); + return ERR_PTR(-EBUSY); + } + + vs->inuse = true; + + mutex_unlock(&vs->mutex); + + return vs; +} +EXPORT_SYMBOL_GPL(dpu_vs_get); + +void dpu_vs_put(struct dpu_vscaler *vs) +{ + mutex_lock(&vs->mutex); + + vs->inuse = false; + + mutex_unlock(&vs->mutex); +} +EXPORT_SYMBOL_GPL(dpu_vs_put); + +void _dpu_vs_init(struct dpu_soc *dpu, unsigned int id) +{ + struct dpu_vscaler *vs; + int i; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (WARN_ON(i == ARRAY_SIZE(vs_ids))) + return; + + vs = dpu->vs_priv[i]; + + vscaler_shden(vs, true); + vscaler_setup2(vs, false); + vscaler_setup3(vs, false); + vscaler_setup4(vs, 0); + vscaler_setup5(vs, 0); + vscaler_pixengcfg_dynamic_src_sel(vs, VS_SRC_SEL__DISABLE); +} + +int dpu_vs_init(struct dpu_soc *dpu, unsigned int id, + unsigned long pec_base, unsigned long base) +{ + struct dpu_vscaler *vs; + int i; + + vs = devm_kzalloc(dpu->dev, sizeof(*vs), GFP_KERNEL); + if (!vs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(vs_ids); i++) + if (vs_ids[i] == id) + break; + + if (i == ARRAY_SIZE(vs_ids)) + return -EINVAL; + + dpu->vs_priv[i] = vs; + + vs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8); + if (!vs->pec_base) + return -ENOMEM; + + vs->base = devm_ioremap(dpu->dev, base, SZ_1K); + if (!vs->base) + return -ENOMEM; + + vs->dpu = dpu; + vs->id = id; + + mutex_init(&vs->mutex); + + _dpu_vs_init(dpu, id); + + return 0; +} diff --git a/include/video/dpu.h b/include/video/dpu.h new file mode 100644 index 0000000000000000000000000000000000000000..ffa3e5a69d148ead9b031b7ca23e0d3f324011d5 --- /dev/null +++ b/include/video/dpu.h @@ -0,0 +1,847 @@ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2019 NXP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef __DRM_DPU_H__ +#define __DRM_DPU_H__ + +#include <drm/drm_crtc.h> +#include <drm/drm_modes.h> +#include <video/imx8-prefetch.h> +#include <video/videomode.h> + +struct dpu_soc; + +enum dpu_irq { + IRQ_STORE9_SHDLOAD = 0, + IRQ_STORE9_FRAMECOMPLETE = 1, + IRQ_STORE9_SEQCOMPLETE = 2, + IRQ_EXTDST0_SHDLOAD = 3, + IRQ_EXTDST0_FRAMECOMPLETE = 4, + IRQ_EXTDST0_SEQCOMPLETE = 5, + IRQ_EXTDST4_SHDLOAD = 6, + IRQ_EXTDST4_FRAMECOMPLETE = 7, + IRQ_EXTDST4_SEQCOMPLETE = 8, + IRQ_EXTDST1_SHDLOAD = 9, + IRQ_EXTDST1_FRAMECOMPLETE = 10, + IRQ_EXTDST1_SEQCOMPLETE = 11, + IRQ_EXTDST5_SHDLOAD = 12, + IRQ_EXTDST5_FRAMECOMPLETE = 13, + IRQ_EXTDST5_SEQCOMPLETE = 14, + IRQ_STORE4_SHDLOAD = 15, + IRQ_STORE4_FRAMECOMPLETE = 16, + IRQ_STORE4_SEQCOMPLETE = 17, + IRQ_STORE5_SHDLOAD = 18, + IRQ_STORE5_FRAMECOMPLETE = 19, + IRQ_STORE5_SEQCOMPLETE = 20, + IRQ_RESERVED21 = 21, + IRQ_HISTOGRAM4_VALID = 22, + IRQ_RESERVED23 = 23, + IRQ_HISTOGRAM5_VALID = 24, + IRQ_FRAMEDUMP0_ERROR = 25, + IRQ_FRAMEDUMP1_ERROR = 26, + IRQ_DISENGCFG_SHDLOAD0 = 27, + IRQ_DISENGCFG_FRAMECOMPLETE0 = 28, + IRQ_DISENGCFG_SEQCOMPLETE0 = 29, + IRQ_FRAMEGEN0_INT0 = 30, + IRQ_FRAMEGEN0_INT1 = 31, + IRQ_FRAMEGEN0_INT2 = 32, + IRQ_FRAMEGEN0_INT3 = 33, + IRQ_SIG0_SHDLOAD = 34, + IRQ_SIG0_VALID = 35, + IRQ_SIG0_ERROR = 36, + IRQ_DISENGCFG_SHDLOAD1 = 37, + IRQ_DISENGCFG_FRAMECOMPLETE1 = 38, + IRQ_DISENGCFG_SEQCOMPLETE1 = 39, + IRQ_FRAMEGEN1_INT0 = 40, + IRQ_FRAMEGEN1_INT1 = 41, + IRQ_FRAMEGEN1_INT2 = 42, + IRQ_FRAMEGEN1_INT3 = 43, + IRQ_SIG1_SHDLOAD = 44, + IRQ_SIG1_VALID = 45, + IRQ_SIG1_ERROR = 46, + IRQ_ITUIFC4_ERROR = 47, + IRQ_ITUIFC5_ERROR = 48, + IRQ_RESERVED49 = 49, + IRQ_CMDSEQ_ERROR = 50, + IRQ_COMCTRL_SW0 = 51, + IRQ_COMCTRL_SW1 = 52, + IRQ_COMCTRL_SW2 = 53, + IRQ_COMCTRL_SW3 = 54, + IRQ_FRAMEGEN0_PRIMSYNC_ON = 55, + IRQ_FRAMEGEN0_PRIMSYNC_OFF = 56, + IRQ_FRAMEGEN0_SECSYNC_ON = 57, + IRQ_FRAMEGEN0_SECSYNC_OFF = 58, + IRQ_FRAMEGEN1_PRIMSYNC_ON = 59, + IRQ_FRAMEGEN1_PRIMSYNC_OFF = 60, + IRQ_FRAMEGEN1_SECSYNC_ON = 61, + IRQ_FRAMEGEN1_SECSYNC_OFF = 62, + IRQ_FRAMECAP4_SYNC_ON = 63, + IRQ_FRAMECAP4_SYNC_OFF = 64, + IRQ_CMD = 65, + IRQ_FRAMECAP5_SYNC_OFF = 66, +}; + +typedef enum { + ID_NONE = 0x00, /* 0 */ + /* pixel engines */ + ID_FETCHDECODE9 = 0x01, /* 1 */ + ID_FETCHPERSP9 = 0x02, /* 2 */ + ID_FETCHECO9 = 0x03, /* 3 */ + ID_ROP9 = 0x04, /* 4 */ + ID_CLUT9 = 0x05, /* 5 */ + ID_MATRIX9 = 0x06, /* 6 */ + ID_HSCALER9 = 0x07, /* 7 */ + ID_VSCALER9 = 0x08, /* 8 */ + ID_FILTER9 = 0x09, /* 9 */ + ID_BLITBLEND9 = 0x0A, /* 10 */ + ID_STORE9 = 0x0B, /* 11 */ + ID_CONSTFRAME0 = 0x0C, /* 12 */ + ID_EXTDST0 = 0x0D, /* 13 */ + ID_CONSTFRAME4 = 0x0E, /* 14 */ + ID_EXTDST4 = 0x0F, /* 15 */ + ID_CONSTFRAME1 = 0x10, /* 16 */ + ID_EXTDST1 = 0x11, /* 17 */ + ID_CONSTFRAME5 = 0x12, /* 18 */ + ID_EXTDST5 = 0x13, /* 19 */ + ID_EXTSRC4 = 0x14, /* 20 */ + ID_STORE4 = 0x15, /* 21 */ + ID_EXTSRC5 = 0x16, /* 22 */ + ID_STORE5 = 0x17, /* 23 */ + ID_FETCHDECODE2 = 0x18, /* 24 */ + ID_FETCHDECODE3 = 0x19, /* 25 */ + ID_FETCHWARP2 = 0x1A, /* 26 */ + ID_FETCHECO2 = 0x1B, /* 27 */ + ID_FETCHDECODE0 = 0x1C, /* 28 */ + ID_FETCHECO0 = 0x1D, /* 29 */ + ID_FETCHDECODE1 = 0x1E, /* 30 */ + ID_FETCHECO1 = 0x1F, /* 31 */ + ID_FETCHLAYER0 = 0x20, /* 32 */ + ID_FETCHLAYER1 = 0x21, /* 33 */ + ID_GAMMACOR4 = 0x22, /* 34 */ + ID_MATRIX4 = 0x23, /* 35 */ + ID_HSCALER4 = 0x24, /* 36 */ + ID_VSCALER4 = 0x25, /* 37 */ + ID_HISTOGRAM4 = 0x26, /* 38 */ + ID_GAMMACOR5 = 0x27, /* 39 */ + ID_MATRIX5 = 0x28, /* 40 */ + ID_HSCALER5 = 0x29, /* 41 */ + ID_VSCALER5 = 0x2A, /* 42 */ + ID_HISTOGRAM5 = 0x2B, /* 43 */ + ID_LAYERBLEND0 = 0x2C, /* 44 */ + ID_LAYERBLEND1 = 0x2D, /* 45 */ + ID_LAYERBLEND2 = 0x2E, /* 46 */ + ID_LAYERBLEND3 = 0x2F, /* 47 */ + ID_LAYERBLEND4 = 0x30, /* 48 */ + ID_LAYERBLEND5 = 0x31, /* 49 */ + ID_LAYERBLEND6 = 0x32, /* 50 */ + ID_EXTSRC0 = 0x33, /* 51 */ + ID_EXTSRC1 = 0x34, /* 52 */ + /* display engines */ + ID_DISENGCFG = 0x35, /* 53 */ + ID_FRAMEGEN0 = 0x36, /* 54 */ + ID_MATRIX0 = 0x37, /* 55 */ + ID_GAMMACOR0 = 0x38, /* 56 */ + ID_DITHER0 = 0x39, /* 57 */ + ID_TCON0 = 0x3A, /* 58 */ + ID_SIG0 = 0x3B, /* 59 */ + ID_FRAMEGEN1 = 0x3C, /* 60 */ + ID_MATRIX1 = 0x3D, /* 61 */ + ID_GAMMACOR1 = 0x3E, /* 62 */ + ID_DITHER1 = 0x3F, /* 63 */ + ID_TCON1 = 0x40, /* 64 */ + ID_SIG1 = 0x41, /* 65 */ + ID_FRAMECAP4 = 0x42, /* 66 */ + ID_FRAMECAP5 = 0x43, /* 67 */ +} dpu_block_id_t; + +typedef enum { + ED_SRC_DISABLE = ID_NONE, + ED_SRC_BLITBLEND9 = ID_BLITBLEND9, + ED_SRC_CONSTFRAME0 = ID_CONSTFRAME0, + ED_SRC_CONSTFRAME1 = ID_CONSTFRAME1, + ED_SRC_CONSTFRAME4 = ID_CONSTFRAME4, + ED_SRC_CONSTFRAME5 = ID_CONSTFRAME5, + ED_SRC_MATRIX4 = ID_MATRIX4, + ED_SRC_HSCALER4 = ID_HSCALER4, + ED_SRC_VSCALER4 = ID_VSCALER4, + /* content stream(extdst 0/1) only */ + ED_SRC_EXTSRC4 = ID_EXTSRC4, + ED_SRC_MATRIX5 = ID_MATRIX5, + ED_SRC_HSCALER5 = ID_HSCALER5, + ED_SRC_VSCALER5 = ID_VSCALER5, + /* content stream(extdst 0/1) only */ + ED_SRC_EXTSRC5 = ID_EXTSRC5, + ED_SRC_LAYERBLEND6 = ID_LAYERBLEND6, + ED_SRC_LAYERBLEND5 = ID_LAYERBLEND5, + ED_SRC_LAYERBLEND4 = ID_LAYERBLEND4, + ED_SRC_LAYERBLEND3 = ID_LAYERBLEND3, + ED_SRC_LAYERBLEND2 = ID_LAYERBLEND2, + ED_SRC_LAYERBLEND1 = ID_LAYERBLEND1, + ED_SRC_LAYERBLEND0 = ID_LAYERBLEND0, +} extdst_src_sel_t; + +typedef enum { + SINGLE, /* Reconfig pipeline after explicit trigger */ + AUTO, /* Reconfig pipeline after every kick when idle */ +} ed_sync_mode_t; + +typedef enum { + PSTATUS_EMPTY, + PSTATUS_RUNNING, + PSTATUS_RUNNING_RETRIGGERED, + PSTATUS_RESERVED +} ed_pipeline_status_t; + +typedef enum { + SOFTWARE = 0, /* kick generation by KICK field only */ + EXTERNAL = BIT(8), /* kick signal from external allowed */ +} ed_kick_mode_t; + +typedef enum { + SHLDREQID_FETCHDECODE9 = BIT(1), + SHLDREQID_FETCHPERSP9 = BIT(2), + SHLDREQID_FETCHECO9 = BIT(3), + SHLDREQID_CONSTFRAME0 = BIT(4), + SHLDREQID_CONSTFRAME4 = BIT(5), + SHLDREQID_CONSTFRAME1 = BIT(6), + SHLDREQID_CONSTFRAME5 = BIT(7), + SHLDREQID_EXTSRC4 = BIT(8), + SHLDREQID_EXTSRC5 = BIT(9), + SHLDREQID_FETCHDECODE2 = BIT(10), + SHLDREQID_FETCHDECODE3 = BIT(11), + SHLDREQID_FETCHWARP2 = BIT(12), + SHLDREQID_FETCHECO2 = BIT(13), + SHLDREQID_FETCHDECODE0 = BIT(14), + SHLDREQID_FETCHECO0 = BIT(15), + SHLDREQID_FETCHDECODE1 = BIT(16), + SHLDREQID_FETCHECO1 = BIT(17), + SHLDREQID_FETCHLAYER0 = BIT(18), + SHLDREQID_FETCHLAYER1 = BIT(19), + SHLDREQID_EXTSRC0 = BIT(20), + SHLDREQID_EXTSRC1 = BIT(21), +} shadow_load_req_t; + +typedef enum { + PIXENGCFG_STATUS_SEL_DISABLE, + PIXENGCFG_STATUS_SEL_STORE9, + PIXENGCFG_STATUS_SEL_EXTDST0, + PIXENGCFG_STATUS_SEL_EXTDST4, + PIXENGCFG_STATUS_SEL_EXTDST1, + PIXENGCFG_STATUS_SEL_EXTDST5, + PIXENGCFG_STATUS_SEL_STORE4, + PIXENGCFG_STATUS_SEL_STORE5, +} pixengcfg_status_sel_t; + +typedef enum { + FD_SRC_DISABLE = ID_NONE, + FD_SRC_FETCHECO0 = ID_FETCHECO0, + FD_SRC_FETCHECO1 = ID_FETCHECO1, + FD_SRC_FETCHECO2 = ID_FETCHECO2, + FD_SRC_FETCHDECODE0 = ID_FETCHDECODE0, + FD_SRC_FETCHDECODE1 = ID_FETCHDECODE1, + FD_SRC_FETCHDECODE2 = ID_FETCHDECODE2, + FD_SRC_FETCHDECODE3 = ID_FETCHDECODE3, + FD_SRC_FETCHWARP2 = ID_FETCHWARP2, +} fd_dynamic_src_sel_t; + +typedef enum { + /* RL and RLAD decoder */ + FETCHTYPE__DECODE, + /* fractional plane(8 layers) */ + FETCHTYPE__LAYER, + /* arbitrary warping and fractional plane(8 layers) */ + FETCHTYPE__WARP, + /* minimum feature set for alpha, chroma and coordinate planes */ + FETCHTYPE__ECO, + /* affine, perspective and arbitrary warping */ + FETCHTYPE__PERSP, + /* affine and arbitrary warping */ + FETCHTYPE__ROT, + /* RL and RLAD decoder, reduced feature set */ + FETCHTYPE__DECODEL, + /* fractional plane(8 layers), reduced feature set */ + FETCHTYPE__LAYERL, + /* affine and arbitrary warping, reduced feature set */ + FETCHTYPE__ROTL, +} fetchtype_t; + +typedef enum { + /* No side-by-side synchronization. */ + FGSYNCMODE__OFF = 0, + /* Framegen is master. */ + FGSYNCMODE__MASTER = 1 << 1, + /* Runs in cyclic synchronization mode. */ + FGSYNCMODE__SLAVE_CYC = 2 << 1, + /* Runs in one time synchronization mode. */ + FGSYNCMODE__SLAVE_ONCE = 3 << 1, +} fgsyncmode_t; + +typedef enum { + FGDM__BLACK, + /* Constant Color Background is shown. */ + FGDM__CONSTCOL, + FGDM__PRIM, + FGDM__SEC, + FGDM__PRIM_ON_TOP, + FGDM__SEC_ON_TOP, + /* White color background with test pattern is shown. */ + FGDM__TEST, +} fgdm_t; + +typedef enum { + HS_SRC_SEL__DISABLE = ID_NONE, + HS_SRC_SEL__MATRIX9 = ID_MATRIX9, + HS_SRC_SEL__VSCALER9 = ID_VSCALER9, + HS_SRC_SEL__FILTER9 = ID_FILTER9, + HS_SRC_SEL__EXTSRC4 = ID_EXTSRC4, + HS_SRC_SEL__EXTSRC5 = ID_EXTSRC5, + HS_SRC_SEL__FETCHDECODE2 = ID_FETCHDECODE2, + HS_SRC_SEL__FETCHDECODE3 = ID_FETCHDECODE3, + HS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + HS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + HS_SRC_SEL__MATRIX4 = ID_MATRIX4, + HS_SRC_SEL__VSCALER4 = ID_VSCALER4, + HS_SRC_SEL__MATRIX5 = ID_MATRIX5, + HS_SRC_SEL__VSCALER5 = ID_VSCALER5, +} hs_src_sel_t; + +typedef enum { + /* common options */ + LB_PRIM_SEL__DISABLE = ID_NONE, + LB_PRIM_SEL__BLITBLEND9 = ID_BLITBLEND9, + LB_PRIM_SEL__CONSTFRAME0 = ID_CONSTFRAME0, + LB_PRIM_SEL__CONSTFRAME1 = ID_CONSTFRAME1, + LB_PRIM_SEL__CONSTFRAME4 = ID_CONSTFRAME4, + LB_PRIM_SEL__CONSTFRAME5 = ID_CONSTFRAME5, + LB_PRIM_SEL__MATRIX4 = ID_MATRIX4, + LB_PRIM_SEL__HSCALER4 = ID_HSCALER4, + LB_PRIM_SEL__VSCALER4 = ID_VSCALER4, + LB_PRIM_SEL__EXTSRC4 = ID_EXTSRC4, + LB_PRIM_SEL__MATRIX5 = ID_MATRIX5, + LB_PRIM_SEL__HSCALER5 = ID_HSCALER5, + LB_PRIM_SEL__VSCALER5 = ID_VSCALER5, + LB_PRIM_SEL__EXTSRC5 = ID_EXTSRC5, + /* + * special options: + * layerblend(n) has n special options, + * from layerblend0 to layerblend(n - 1), e.g., + * layerblend4 has 4 special options - + * layerblend0/1/2/3. + */ + LB_PRIM_SEL__LAYERBLEND5 = ID_LAYERBLEND5, + LB_PRIM_SEL__LAYERBLEND4 = ID_LAYERBLEND4, + LB_PRIM_SEL__LAYERBLEND3 = ID_LAYERBLEND3, + LB_PRIM_SEL__LAYERBLEND2 = ID_LAYERBLEND2, + LB_PRIM_SEL__LAYERBLEND1 = ID_LAYERBLEND1, + LB_PRIM_SEL__LAYERBLEND0 = ID_LAYERBLEND0, +} lb_prim_sel_t; + +typedef enum { + LB_SEC_SEL__DISABLE = ID_NONE, + LB_SEC_SEL__FETCHDECODE2 = ID_FETCHDECODE2, + LB_SEC_SEL__FETCHDECODE3 = ID_FETCHDECODE3, + LB_SEC_SEL__FETCHWARP2 = ID_FETCHWARP2, + LB_SEC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + LB_SEC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + LB_SEC_SEL__MATRIX4 = ID_MATRIX4, + LB_SEC_SEL__HSCALER4 = ID_HSCALER4, + LB_SEC_SEL__VSCALER4 = ID_VSCALER4, + LB_SEC_SEL__MATRIX5 = ID_MATRIX5, + LB_SEC_SEL__HSCALER5 = ID_HSCALER5, + LB_SEC_SEL__VSCALER5 = ID_VSCALER5, + LB_SEC_SEL__FETCHLAYER0 = ID_FETCHLAYER0, + LB_SEC_SEL__FETCHLAYER1 = ID_FETCHLAYER1, +} lb_sec_sel_t; + +typedef enum { + PRIMARY, /* background plane */ + SECONDARY, /* foreground plane */ + BOTH, +} lb_shadow_sel_t; + +typedef enum { + LB_NEUTRAL, /* Output is same as primary input. */ + LB_BLEND, +} lb_mode_t; + +typedef enum { + /* Constant 0 indicates frame or top field. */ + SCALER_ALWAYS0 = 0x0, + /* Constant 1 indicates bottom field. */ + SCALER_ALWAYS1 = 0x1 << 12, + /* Output field polarity is taken from input field polarity. */ + SCALER_INPUT = 0x2 << 12, + /* Output field polarity toggles, starting with 0 after reset. */ + SCALER_TOGGLE = 0x3 << 12, +} scaler_field_mode_t; + +typedef enum { + /* pointer-sampling */ + SCALER_NEAREST = 0x0, + /* box filter */ + SCALER_LINEAR = 0x100, +} scaler_filter_mode_t; + +typedef enum { + SCALER_DOWNSCALE = 0x0, + SCALER_UPSCALE = 0x10, +} scaler_scale_mode_t; + +typedef enum { + /* Pixel by-pass the scaler, all other settings are ignored. */ + SCALER_NEUTRAL = 0x0, + /* Scaler is active. */ + SCALER_ACTIVE = 0x1, +} scaler_mode_t; + +typedef enum { + VS_SRC_SEL__DISABLE = ID_NONE, + VS_SRC_SEL__MATRIX9 = ID_MATRIX9, + VS_SRC_SEL__HSCALER9 = ID_HSCALER9, + VS_SRC_SEL__EXTSRC4 = ID_EXTSRC4, + VS_SRC_SEL__EXTSRC5 = ID_EXTSRC5, + VS_SRC_SEL__FETCHDECODE2 = ID_FETCHDECODE2, + VS_SRC_SEL__FETCHDECODE3 = ID_FETCHDECODE3, + VS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0, + VS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1, + VS_SRC_SEL__MATRIX4 = ID_MATRIX4, + VS_SRC_SEL__HSCALER4 = ID_HSCALER4, + VS_SRC_SEL__MATRIX5 = ID_MATRIX5, + VS_SRC_SEL__HSCALER5 = ID_HSCALER5, +} vs_src_sel_t; + +#define CLKEN_MASK (0x3 << 24) +#define CLKEN_MASK_SHIFT 24 +typedef enum { + CLKEN__DISABLE = 0x0, + CLKEN__AUTOMATIC = 0x1, + CLKEN__FULL = 0x3, +} pixengcfg_clken_t; + +/* fetch unit types */ +enum { + FU_T_NA, + FU_T_FD, + FU_T_FE, + FU_T_FL, + FU_T_FW, +}; + +struct dpu_fetchunit; + +struct dpu_fetchunit_ops { + void (*set_burstlength)(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch); + + void (*set_baseaddress)(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr); + + void (*set_src_bpp)(struct dpu_fetchunit *fu, int bpp); + + void (*set_src_stride)(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch); + + void (*set_src_buf_dimensions)(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, u32 fmt, + bool deinterlace); + + void (*set_fmt)(struct dpu_fetchunit *fu, u32 fmt, bool deinterlace); + + void (*enable_src_buf)(struct dpu_fetchunit *fu); + void (*disable_src_buf)(struct dpu_fetchunit *fu); + bool (*is_enabled)(struct dpu_fetchunit *fu); + + void (*set_framedimensions)(struct dpu_fetchunit *fu, + unsigned int w, unsigned int h, + bool deinterlace); + + void (*set_controltrigger)(struct dpu_fetchunit *fu); + + unsigned int (*get_stream_id)(struct dpu_fetchunit *fu); + void (*set_stream_id)(struct dpu_fetchunit *fu, unsigned int id); + + void (*pin_off)(struct dpu_fetchunit *fu); + void (*unpin_off)(struct dpu_fetchunit *fu); + bool (*is_pinned_off)(struct dpu_fetchunit *fu); +}; + +struct dpu_fetchunit { + void __iomem *pec_base; + void __iomem *base; + char *name; + struct mutex mutex; + int id; + int sub_id; /* for fractional fetch units */ + int type; + bool inuse; + struct dpu_soc *dpu; + /* see DPU_PLANE_SRC_xxx */ + unsigned int stream_id; + bool pin_off; + struct dprc *dprc; + const struct dpu_fetchunit_ops *ops; +}; + +int dpu_map_inner_irq(struct dpu_soc *dpu, int irq); + +/* Constant Frame Unit */ +struct dpu_constframe; +void constframe_shden(struct dpu_constframe *cf, bool enable); +void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w, + unsigned int h); +void constframe_framedimensions_copy_prim(struct dpu_constframe *cf); +void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r, + unsigned int g, unsigned int b, unsigned int a); +void constframe_controltrigger(struct dpu_constframe *cf, bool trigger); +shadow_load_req_t constframe_to_shdldreq_t(struct dpu_constframe *cf); +struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id); +void dpu_cf_put(struct dpu_constframe *cf); +struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf); + +/* Display Engine Configuration Unit */ +struct dpu_disengcfg; +void disengcfg_polarity_ctrl(struct dpu_disengcfg *dec, unsigned int flags); +struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id); +void dpu_dec_put(struct dpu_disengcfg *dec); +struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec); + +/* External Destination Unit */ +struct dpu_extdst; +void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable); +void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown); +void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode); +void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset); +void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div); +void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable); +int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src); +void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed); +void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask); +void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed); +void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed); +bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed); +ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed); +void extdst_shden(struct dpu_extdst *ed, bool enable); +void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode); +void extdst_perfcountmode(struct dpu_extdst *ed, bool enable); +void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable); +void extdst_kick(struct dpu_extdst *ed); +void extdst_cnt_err_clear(struct dpu_extdst *ed); +bool extdst_cnt_err_status(struct dpu_extdst *ed); +u32 extdst_last_control_word(struct dpu_extdst *ed); +void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y); +void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y); +u32 extdst_perfresult(struct dpu_extdst *ed); +bool extdst_is_master(struct dpu_extdst *ed); +struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id); +void dpu_ed_put(struct dpu_extdst *ed); +struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed); + +/* Fetch Decode Unit */ +int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu, + fd_dynamic_src_sel_t src); +void fetchdecode_layeroffset(struct dpu_fetchunit *fd, unsigned int x, + unsigned int y); +void fetchdecode_clipoffset(struct dpu_fetchunit *fd, unsigned int x, + unsigned int y); +void fetchdecode_clipdimensions(struct dpu_fetchunit *fd, unsigned int w, + unsigned int h); +void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fd, + u8 r, u8 g, u8 b, u8 a); +void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fd, + u8 y, u8 u, u8 v); +int fetchdecode_fetchtype(struct dpu_fetchunit *fd, fetchtype_t *type); +shadow_load_req_t fetchdecode_to_shdldreq_t(struct dpu_fetchunit *fd); +u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fd); +bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fd, u32 fmt); +struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id); +void dpu_fd_put(struct dpu_fetchunit *fu); + +/* Fetch ECO Unit */ +void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w, + unsigned int h); +void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x, + unsigned int y); +int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu); +struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id); +void dpu_fe_put(struct dpu_fetchunit *fu); + +/* Fetch Layer Unit */ +void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a); +void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v); +int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id); +void dpu_fl_put(struct dpu_fetchunit *fu); + +/* Fetch Warp Unit */ +void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu, + u8 r, u8 g, u8 b, u8 a); +void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v); +int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type); +struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id); +void dpu_fw_put(struct dpu_fetchunit *fu); + +/* Frame Generator Unit */ +struct dpu_framegen; +void framegen_enable(struct dpu_framegen *fg); +void framegen_disable(struct dpu_framegen *fg); +void framegen_shdtokgen(struct dpu_framegen *fg); +void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode); +void +framegen_cfg_videomode(struct dpu_framegen *fg, + struct drm_display_mode *m, bool side_by_side, + bool encoder_type_has_tmds, bool encoder_type_has_lvds); +void framegen_pkickconfig(struct dpu_framegen *fg, bool enable); +void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable); +void framegen_sacfg(struct dpu_framegen *fg, unsigned int x, unsigned int y); +void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode); +void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode); +void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m); +void framegen_read_timestamp(struct dpu_framegen *fg, + u32 *frame_index, u32 *line_index); +void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg); +bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg); +void framegen_secondary_clear_channel_status(struct dpu_framegen *fg); +bool framegen_secondary_is_syncup(struct dpu_framegen *fg); +void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg); +void framegen_enable_clock(struct dpu_framegen *fg); +void framegen_disable_clock(struct dpu_framegen *fg); +bool framegen_is_master(struct dpu_framegen *fg); +bool framegen_is_slave(struct dpu_framegen *fg); +struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id); +void dpu_fg_put(struct dpu_framegen *fg); +struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg); + +/* Horizontal Scaler Unit */ +struct dpu_hscaler; +int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src); +void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken); +void hscaler_shden(struct dpu_hscaler *hs, bool enable); +void hscaler_setup1(struct dpu_hscaler *hs, unsigned int src, unsigned int dst); +void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset); +void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num); +void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m); +void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m); +void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m); +bool hscaler_is_enabled(struct dpu_hscaler *hs); +dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs); +unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs); +void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id); +struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id); +void dpu_hs_put(struct dpu_hscaler *hs); + +/* Layer Blend Unit */ +struct dpu_layerblend; +int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb, + lb_prim_sel_t prim); +void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb, + lb_sec_sel_t sec); +void layerblend_pixengcfg_clken(struct dpu_layerblend *lb, + pixengcfg_clken_t clken); +void layerblend_shden(struct dpu_layerblend *lb, bool enable); +void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel); +void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel); +void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode); +void layerblend_blendcontrol(struct dpu_layerblend *lb, bool sec_from_scaler); +void layerblend_position(struct dpu_layerblend *lb, int x, int y); +struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id); +void dpu_lb_put(struct dpu_layerblend *lb); + +/* Store Unit */ +struct dpu_store; +void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable); +struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id); +void dpu_st_put(struct dpu_store *st); + +/* Timing Controller Unit */ +struct dpu_tcon; +int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format); +void tcon_set_operation_mode(struct dpu_tcon *tcon); +void tcon_cfg_videomode(struct dpu_tcon *tcon, + struct drm_display_mode *m, bool side_by_side); +bool tcon_is_master(struct dpu_tcon *tcon); +bool tcon_is_slave(struct dpu_tcon *tcon); +void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di, + unsigned int frame_width, u32 mode, u32 format); +void tcon_enable_pc(struct dpu_tcon *tcon); +void tcon_disable_pc(struct dpu_tcon *tcon); +struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id); +void dpu_tcon_put(struct dpu_tcon *tcon); +struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon); + +/* Vertical Scaler Unit */ +struct dpu_vscaler; +int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src); +void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken); +void vscaler_shden(struct dpu_vscaler *vs, bool enable); +void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace); +void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace); +void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace); +void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset); +void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset); +void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num); +void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m); +void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m); +void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m); +void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m); +bool vscaler_is_enabled(struct dpu_vscaler *vs); +dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs); +unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs); +void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id); +struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id); +void dpu_vs_put(struct dpu_vscaler *vs); + +struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu); +struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu); +struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu); + +bool dpu_has_pc(struct dpu_soc *dpu); +unsigned int dpu_get_syncmode_min_prate(struct dpu_soc *dpu); +unsigned int dpu_get_singlemode_max_width(struct dpu_soc *dpu); +unsigned int dpu_get_master_stream_id(struct dpu_soc *dpu); + +bool dpu_vproc_has_fetcheco_cap(u32 cap_mask); +bool dpu_vproc_has_hscale_cap(u32 cap_mask); +bool dpu_vproc_has_vscale_cap(u32 cap_mask); + +u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask); +u32 dpu_vproc_get_hscale_cap(u32 cap_mask); +u32 dpu_vproc_get_vscale_cap(u32 cap_mask); + +void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data); +void fetchunit_shden(struct dpu_fetchunit *fu, bool enable); +void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask); +void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask); +void fetchunit_set_burstlength(struct dpu_fetchunit *fu, + unsigned int x_offset, unsigned int mt_w, + int bpp, dma_addr_t baddr, bool use_prefetch); +void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width, + unsigned int x_offset, unsigned int y_offset, + unsigned int mt_w, unsigned int mt_h, + int bpp, dma_addr_t baddr); +void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp); +void fetchunit_set_src_stride(struct dpu_fetchunit *fu, + unsigned int width, unsigned int x_offset, + unsigned int mt_w, int bpp, unsigned int stride, + dma_addr_t baddr, bool use_prefetch); +void fetchunit_enable_src_buf(struct dpu_fetchunit *fu); +void fetchunit_disable_src_buf(struct dpu_fetchunit *fu); +bool fetchunit_is_enabled(struct dpu_fetchunit *fu); +unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu); +void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id); +void fetchunit_pin_off(struct dpu_fetchunit *fu); +void fetchunit_unpin_off(struct dpu_fetchunit *fu); +bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu); +bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu); +bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu); + +/* dpu blit engine */ +struct dpu_bliteng; +int dpu_bliteng_init(struct dpu_bliteng *dpu_bliteng); +void dpu_bliteng_fini(struct dpu_bliteng *dpu_bliteng); +int dpu_be_get(struct dpu_bliteng *dpu_be); +void dpu_be_put(struct dpu_bliteng *dpu_be); +void dpu_be_wait(struct dpu_bliteng *dpu_be); +int dpu_be_blit(struct dpu_bliteng *dpu_be, u32 *cmdlist, + u32 cmdnum); +int dpu_bliteng_get_empty_instance(struct dpu_bliteng **dpu_be, + struct device *dev); +u32 *dpu_bliteng_get_cmd_list(struct dpu_bliteng *dpu_be); +s32 dpu_bliteng_get_id(struct dpu_bliteng *dpu_be); +void dpu_bliteng_set_id(struct dpu_bliteng *dpu_be, int id); +void dpu_bliteng_set_dev(struct dpu_bliteng *dpu_be, struct device *dev); + +void dpu_be_configure_prefetch(struct dpu_bliteng *dpu_be, + u32 width, u32 height, + u32 x_offset, u32 y_offset, + u32 stride, u32 format, u64 modifier, + u64 baddr, u64 uv_addr); + +/* + * to avoid on-the-fly/hot plane resource migration + * between two display interfaces + */ +#define DPU_PLANE_SRC_TO_DISP_STREAM0 BIT(0) +#define DPU_PLANE_SRC_TO_DISP_STREAM1 BIT(1) +#define DPU_PLANE_SRC_DISABLED 0 + +#define MAX_FD_NUM 4 +#define MAX_FL_NUM 2 +#define MAX_FW_NUM 1 +#define MAX_LB_NUM 7 +struct dpu_plane_res { + struct dpu_constframe *cf[2]; + struct dpu_extdst *ed[2]; + struct dpu_fetchunit *fd[MAX_FD_NUM]; + struct dpu_fetchunit *fe[2]; + struct dpu_fetchunit *fl[MAX_FL_NUM]; + struct dpu_fetchunit *fw[MAX_FW_NUM]; + struct dpu_framegen *fg[2]; + struct dpu_hscaler *hs[2]; + struct dpu_layerblend *lb[MAX_LB_NUM]; + struct dpu_vscaler *vs[2]; +}; + +/* + * Each DPU plane can be a primary plane or an overlay plane + * of one of the DPU's two CRTCs. + */ +struct dpu_plane_grp { + struct dpu_plane_res res; + unsigned int hw_plane_num; + unsigned int hw_plane_fetcheco_num; + unsigned int hw_plane_hscaler_num; + unsigned int hw_plane_vscaler_num; + unsigned int id; + bool has_vproc; + /* + * used when assigning plane source + * index: 0 1 2 3 4 5 6 + * source: fl0(sub0) fl1(sub0) fw2(sub0) fd0 fd1 fd2 fd3 + */ + struct mutex mutex; + u32 src_a_mask; + u32 src_na_mask; + u32 src_use_vproc_mask; +}; + +static inline struct dpu_plane_grp *plane_res_to_grp(struct dpu_plane_res *res) +{ + return container_of(res, struct dpu_plane_grp, res); +} + +struct dpu_client_platformdata { + const unsigned int stream_id; + unsigned int di_grp_id; + struct dpu_plane_grp *plane_grp; + + /* Store9 could be shared bewteen display engine and blit engine */ + struct dpu_store *st9; + + struct device_node *of_node; +}; +#endif /* __DRM_DPU_H__ */