diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47c6b9b18a77d9f50995b7b4fc5ab0..85501238b4258316272f9448ad8829b18a1111b2 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
 	struct remap_data *info = data;
 	struct page *page = info->pages[info->index++];
 	unsigned long pfn = page_to_pfn(page);
-	pte_t pte = pfn_pte(pfn, info->prot);
+	pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
 
 	if (map_foreign_page(pfn, info->fgmfn, info->domid))
 		return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
 	}
 	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
 		return 0;
-	xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+	xen_hvm_resume_frames = res.start;
 	xen_events_irq = irq_of_parse_and_map(node, 0);
 	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
-			version, xen_events_irq, xen_hvm_resume_frames);
+			version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
 	xen_domain_type = XEN_HVM_DOMAIN;
 
 	xen_setup_features();
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe0252d280e5d56e0a6bc60ac14182..dde3fc9c49f015c80b43f9ca21f1d996b9d2a910 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     struct dma_attrs *attrs)
 {
-	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir,
 		struct dma_attrs *attrs)
 {
-	__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
 }
 
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
-	__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
 }
 
 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
-	__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
 }
 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b52e49ca3b4f2b40f5baa064928e9..4c02e2b9410377d9e6b77fe1bc2b50d929774551 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
 
 		pfn = page_to_pfn(page);
 
-		set_phys_to_machine(pfn, frame_list[i]);
-
 #ifdef CONFIG_XEN_HAVE_PVMMU
-		/* Link back into the page tables if not highmem. */
-		if (xen_pv_domain() && !PageHighMem(page)) {
-			int ret;
-			ret = HYPERVISOR_update_va_mapping(
-				(unsigned long)__va(pfn << PAGE_SHIFT),
-				mfn_pte(frame_list[i], PAGE_KERNEL),
-				0);
-			BUG_ON(ret);
+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+			set_phys_to_machine(pfn, frame_list[i]);
+
+			/* Link back into the page tables if not highmem. */
+			if (!PageHighMem(page)) {
+				int ret;
+				ret = HYPERVISOR_update_va_mapping(
+						(unsigned long)__va(pfn << PAGE_SHIFT),
+						mfn_pte(frame_list[i], PAGE_KERNEL),
+						0);
+				BUG_ON(ret);
+			}
 		}
 #endif
 
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 	enum bp_state state = BP_DONE;
 	unsigned long  pfn, i;
 	struct page   *page;
-	struct page   *scratch_page;
 	int ret;
 	struct xen_memory_reservation reservation = {
 		.address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
 		scrub_page(page);
 
+#ifdef CONFIG_XEN_HAVE_PVMMU
 		/*
 		 * Ballooned out frames are effectively replaced with
 		 * a scratch frame.  Ensure direct mappings and the
 		 * p2m are consistent.
 		 */
-		scratch_page = get_balloon_scratch_page();
-#ifdef CONFIG_XEN_HAVE_PVMMU
-		if (xen_pv_domain() && !PageHighMem(page)) {
-			ret = HYPERVISOR_update_va_mapping(
-				(unsigned long)__va(pfn << PAGE_SHIFT),
-				pfn_pte(page_to_pfn(scratch_page),
-					PAGE_KERNEL_RO), 0);
-			BUG_ON(ret);
-		}
-#endif
 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 			unsigned long p;
+			struct page   *scratch_page = get_balloon_scratch_page();
+
+			if (!PageHighMem(page)) {
+				ret = HYPERVISOR_update_va_mapping(
+						(unsigned long)__va(pfn << PAGE_SHIFT),
+						pfn_pte(page_to_pfn(scratch_page),
+							PAGE_KERNEL_RO), 0);
+				BUG_ON(ret);
+			}
 			p = page_to_pfn(scratch_page);
 			__set_phys_to_machine(pfn, pfn_to_mfn(p));
+
+			put_balloon_scratch_page();
 		}
-		put_balloon_scratch_page();
+#endif
 
 		balloon_append(pfn_to_page(pfn));
 	}
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
 	if (!xen_domain())
 		return -ENODEV;
 
-	for_each_online_cpu(cpu)
-	{
-		per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
-		if (per_cpu(balloon_scratch_page, cpu) == NULL) {
-			pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
-			return -ENOMEM;
+	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+		for_each_online_cpu(cpu)
+		{
+			per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+			if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+				pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+				return -ENOMEM;
+			}
 		}
+		register_cpu_notifier(&balloon_cpu_notifier);
 	}
-	register_cpu_notifier(&balloon_cpu_notifier);
 
 	pr_info("Initialising balloon driver\n");
 
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b608b04a9fd9483a9916404cb861f76..aa846a48f4009eaac8271a8a653716c27aa1dc01 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
 		gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
 						PAGE_SIZE * max_nr_gframes);
 		if (gnttab_shared.addr == NULL) {
-			pr_warn("Failed to ioremap gnttab share frames!\n");
+			pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
+					xen_hvm_resume_frames);
 			return -ENOMEM;
 		}
 	}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb5d9149e17550c3f4ec7b74142983..569a13b9e856de5c3900050d583844f401243e96 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
 {
 	struct page **pages = vma->vm_private_data;
 	int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	int rc;
 
 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
 		return;
 
-	xen_unmap_domain_mfn_range(vma, numpgs, pages);
-	free_xenballooned_pages(numpgs, pages);
+	rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+	if (rc == 0)
+		free_xenballooned_pages(numpgs, pages);
+	else
+		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
+			numpgs, rc);
 	kfree(pages);
 }
 
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89d9f3b5706858d956d8aabe427348..ae665ac59c36b6812a0470d5a7e0124c3e13a69f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
 struct blkif_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
 	blkif_vdev_t   handle;       /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
 	uint32_t       _pad1;	     /* offsetof(blkif_request,u.rw.id) == 8 */
 #endif
 	uint64_t       id;           /* private guest value, echoed in resp  */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero.        */
 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0          */
 	blkif_vdev_t   _pad1;        /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
 	uint32_t       _pad2;        /* offsetof(blkif_req..,u.discard.id)==8*/
 #endif
 	uint64_t       id;           /* private guest value, echoed in resp  */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
 struct blkif_request_other {
 	uint8_t      _pad1;
 	blkif_vdev_t _pad2;        /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
 	uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
 #endif
 	uint64_t     id;           /* private guest value, echoed in resp  */
@@ -184,7 +184,7 @@ struct blkif_request_other {
 struct blkif_request_indirect {
 	uint8_t        indirect_op;
 	uint16_t       nr_segments;
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
 	uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
 #endif
 	uint64_t       id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
 	blkif_vdev_t   handle;
 	uint16_t       _pad2;
 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
 	uint32_t      _pad3;         /* make it 64 byte aligned */
 #else
 	uint64_t      _pad3;         /* make it 64 byte aligned */