diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0f4e5127a662104886b6f264a3cc8d34d45e018a..1cd1c618ac00bdabf21d60fcb49a91d37b6557c7 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1183,6 +1183,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
 }
 
+static uint8_t *ecb_zero_iv;
+static dma_addr_t ecb_ziv_dma;
+
 /*
  * allocate and map the aead extended descriptor
  */
@@ -1486,6 +1489,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	u8 *iv;
 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+	uint32_t c1_alg_typ = ctx->cdata.algtype;
 
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	if (unlikely(src_nents < 0)) {
@@ -1555,13 +1559,19 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
 	memcpy(iv, req->info, ivsize);
 
-	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-			   0, DMA_NONE, 0, 0);
-		kfree(edesc);
-		return ERR_PTR(-ENOMEM);
+	if ((!req->info && ivsize) &&
+	    ((c1_alg_typ & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) &&
+	    ((c1_alg_typ & OP_ALG_AAI_MASK) == OP_ALG_AAI_ECB)) {
+		iv_dma = ecb_ziv_dma;
+	} else {
+		iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, iv_dma)) {
+			dev_err(jrdev, "unable to map IV\n");
+			caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+				   0, DMA_NONE, 0, 0);
+			kfree(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
 	}
 
 	dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
@@ -3378,10 +3388,35 @@ static void caam_aead_exit(struct crypto_aead *tfm)
 
 static void __exit caam_algapi_exit(void)
 {
-
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
 	struct caam_crypto_alg *t_alg, *n;
 	int i;
 
+	if (!ecb_zero_iv)
+		goto skip_ecb_ziv;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			goto skip_ecb_ziv;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+
+	if (!pdev) {
+		of_node_put(dev_node);
+		goto skip_ecb_ziv;
+	}
+
+	ctrldev = &pdev->dev;
+
+	dma_unmap_single(ctrldev, ecb_ziv_dma, AES_BLOCK_SIZE, DMA_TO_DEVICE);
+	kfree(ecb_zero_iv);
+
+skip_ecb_ziv:
 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
 		struct caam_aead_alg *t_alg = driver_aeads + i;
 
@@ -3494,6 +3529,16 @@ static int __init caam_algapi_init(void)
 	if (!priv)
 		return -ENODEV;
 
+	ecb_zero_iv = kzalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+	if (!ecb_zero_iv)
+		return -ENOMEM;
+
+	ecb_ziv_dma = dma_map_single(ctrldev, ecb_zero_iv, AES_BLOCK_SIZE,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(ctrldev, ecb_ziv_dma)) {
+		kfree(ecb_zero_iv);
+		return -ENOMEM;
+	}
 
 	INIT_LIST_HEAD(&alg_list);