diff --git a/block/bio.c b/block/bio.c
index 006bcc52a77ebef4c0e792c7eb7c19564ce9482f..94d697217887aa1b9e8b214c7606a6f1e7bf8b42 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -538,6 +538,16 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 }
 EXPORT_SYMBOL(zero_fill_bio_iter);
 
+/**
+ * bio_truncate - truncate the bio to small size of @new_size
+ * @bio:	the bio to be truncated
+ * @new_size:	new size for truncating the bio
+ *
+ * Description:
+ *   Truncate the bio to new size of @new_size. If bio_op(bio) is
+ *   REQ_OP_READ, zero the truncated part. This function should only
+ *   be used for handling corner cases, such as bio eod.
+ */
 void bio_truncate(struct bio *bio, unsigned new_size)
 {
 	struct bio_vec bv;
@@ -548,7 +558,7 @@ void bio_truncate(struct bio *bio, unsigned new_size)
 	if (new_size >= bio->bi_iter.bi_size)
 		return;
 
-	if (bio_data_dir(bio) != READ)
+	if (bio_op(bio) != REQ_OP_READ)
 		goto exit;
 
 	bio_for_each_segment(bv, bio, iter) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 667f18f465be1c6d35e52685f29095665efba00e..5dc32b72e7faab7875640106514ab88d4976ebf9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
 	case NVME_SC_CAP_EXCEEDED:
 		return BLK_STS_NOSPC;
 	case NVME_SC_LBA_RANGE:
+	case NVME_SC_CMD_INTERRUPTED:
+	case NVME_SC_NS_NOT_READY:
 		return BLK_STS_TARGET;
 	case NVME_SC_BAD_ATTRIBUTES:
 	case NVME_SC_ONCS_NOT_SUPPORTED:
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 56c21b5011852b15eaf899e625d5a15b251b14b3..72a7e41f3018acdbbb2d1203e37cfec1c3b48583 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
 	return len;
 }
 
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+	switch (cdw10 & 0xff) {
+	case NVME_FEAT_HOST_ID:
+		return sizeof(req->sq->ctrl->hostid);
+	default:
+		return 0;
+	}
+}
+
 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
 {
 	return le64_to_cpu(cmd->get_log_page.lpo);
@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
 	u16 status = 0;
 
-	if (!nvmet_check_data_len(req, 0))
+	if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
 		return;
 
 	switch (cdw10 & 0xff) {
diff --git a/fs/buffer.c b/fs/buffer.c
index e94a6619464c8132218c470ffa45f38ca38e1e06..18a87ec8a465bbf1dccee2cd59e29a3c180fcb5c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3031,7 +3031,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
  * errors, this only handles the "we need to be able to
  * do IO at the final sector" case.
  */
-void guard_bio_eod(int op, struct bio *bio)
+void guard_bio_eod(struct bio *bio)
 {
 	sector_t maxsector;
 	struct hd_struct *part;
@@ -3095,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
 	bio->bi_end_io = end_bio_bh_io_sync;
 	bio->bi_private = bh;
 
-	/* Take care of bh's that straddle the end of the device */
-	guard_bio_eod(op, bio);
-
 	if (buffer_meta(bh))
 		op_flags |= REQ_META;
 	if (buffer_prio(bh))
 		op_flags |= REQ_PRIO;
 	bio_set_op_attrs(bio, op, op_flags);
 
+	/* Take care of bh's that straddle the end of the device */
+	guard_bio_eod(bio);
+
 	if (wbc) {
 		wbc_init_bio(wbc, bio);
 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
diff --git a/fs/internal.h b/fs/internal.h
index 4a7da1df573da9070fa352c1ba5d538b61aaecfe..e3fa69544b66c0bd18e99a023ccf0f32f7b8e009 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
 /*
  * buffer.c
  */
-extern void guard_bio_eod(int rw, struct bio *bio);
+extern void guard_bio_eod(struct bio *bio);
 extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
 		get_block_t *get_block, struct iomap *iomap);
 
diff --git a/fs/mpage.c b/fs/mpage.c
index a63620cdb73a732d1a9f987c7889923c05830535..ccba3c4c44797b54b859acc2378d5f317e6c29be 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
 	bio->bi_end_io = mpage_end_io;
 	bio_set_op_attrs(bio, op, op_flags);
-	guard_bio_eod(op, bio);
+	guard_bio_eod(bio);
 	submit_bio(bio);
 	return NULL;
 }
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 679a42253170bf6325cbdfebadfb283986878d2e..a81c13ac19728c233de45d6ef9fbfae4e4a94e06 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
 	}
 }
 
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
-					struct bio_vec *seg)
-{
-	unsigned total = bvec->bv_offset + bvec->bv_len;
-	unsigned last_page = (total - 1) / PAGE_SIZE;
-
-	seg->bv_page = bvec->bv_page + last_page;
-
-	/* the whole segment is inside the last page */
-	if (bvec->bv_offset >= last_page * PAGE_SIZE) {
-		seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
-		seg->bv_len = bvec->bv_len;
-	} else {
-		seg->bv_offset = 0;
-		seg->bv_len = total - last_page * PAGE_SIZE;
-	}
-}
-
 #endif /* __LINUX_BVEC_ITER_H */