@@ -592,11 +592,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
592
592
dma_addr_t dma_addr = iod -> first_dma , next_dma_addr ;
593
593
int i ;
594
594
595
- if (blk_integrity_rq (req )) {
596
- dma_unmap_page (dev -> dev , iod -> meta_dma ,
597
- rq_integrity_vec (req )-> bv_len , dma_dir );
598
- }
599
-
600
595
if (iod -> nents ) {
601
596
/* P2PDMA requests do not need to be unmapped */
602
597
if (!is_pci_p2pdma_page (sg_page (iod -> sg )))
@@ -858,24 +853,23 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
858
853
ret = nvme_pci_setup_sgls (dev , req , & cmnd -> rw , nr_mapped );
859
854
else
860
855
ret = nvme_pci_setup_prps (dev , req , & cmnd -> rw );
861
-
856
+ out :
862
857
if (ret != BLK_STS_OK )
863
- goto out ;
864
-
865
- ret = BLK_STS_IOERR ;
866
- if (blk_integrity_rq (req )) {
867
- iod -> meta_dma = dma_map_bvec (dev -> dev , rq_integrity_vec (req ),
868
- dma_dir , 0 );
869
- if (dma_mapping_error (dev -> dev , iod -> meta_dma ))
870
- goto out ;
871
- cmnd -> rw .metadata = cpu_to_le64 (iod -> meta_dma );
872
- }
858
+ nvme_unmap_data (dev , req );
859
+ return ret ;
860
+ }
873
861
874
- return BLK_STS_OK ;
862
+ static blk_status_t nvme_map_metadata (struct nvme_dev * dev , struct request * req ,
863
+ struct nvme_command * cmnd )
864
+ {
865
+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
875
866
876
- out :
877
- nvme_unmap_data (dev , req );
878
- return ret ;
867
+ iod -> meta_dma = dma_map_bvec (dev -> dev , rq_integrity_vec (req ),
868
+ rq_dma_dir (req ), 0 );
869
+ if (dma_mapping_error (dev -> dev , iod -> meta_dma ))
870
+ return BLK_STS_IOERR ;
871
+ cmnd -> rw .metadata = cpu_to_le64 (iod -> meta_dma );
872
+ return 0 ;
879
873
}
880
874
881
875
/*
@@ -913,9 +907,17 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
913
907
goto out_free_cmd ;
914
908
}
915
909
910
+ if (blk_integrity_rq (req )) {
911
+ ret = nvme_map_metadata (dev , req , & cmnd );
912
+ if (ret )
913
+ goto out_unmap_data ;
914
+ }
915
+
916
916
blk_mq_start_request (req );
917
917
nvme_submit_cmd (nvmeq , & cmnd , bd -> last );
918
918
return BLK_STS_OK ;
919
+ out_unmap_data :
920
+ nvme_unmap_data (dev , req );
919
921
out_free_cmd :
920
922
nvme_cleanup_cmd (req );
921
923
return ret ;
@@ -924,10 +926,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
924
926
static void nvme_pci_complete_rq (struct request * req )
925
927
{
926
928
struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
929
+ struct nvme_dev * dev = iod -> nvmeq -> dev ;
927
930
928
931
nvme_cleanup_cmd (req );
932
+ if (blk_integrity_rq (req ))
933
+ dma_unmap_page (dev -> dev , iod -> meta_dma ,
934
+ rq_integrity_vec (req )-> bv_len , rq_data_dir (req ));
929
935
if (blk_rq_nr_phys_segments (req ))
930
- nvme_unmap_data (iod -> nvmeq -> dev , req );
936
+ nvme_unmap_data (dev , req );
931
937
nvme_complete_rq (req );
932
938
}
933
939
0 commit comments