diff options
Diffstat (limited to 'kernel/arch/s390/pci/pci_dma.c')
-rw-r--r-- | kernel/arch/s390/pci/pci_dma.c | 121 |
1 files changed, 76 insertions, 45 deletions
diff --git a/kernel/arch/s390/pci/pci_dma.c b/kernel/arch/s390/pci/pci_dma.c index 6fd8d5836..d348f2c09 100644 --- a/kernel/arch/s390/pci/pci_dma.c +++ b/kernel/arch/s390/pci/pci_dma.c @@ -24,7 +24,7 @@ static int zpci_refresh_global(struct zpci_dev *zdev) zdev->iommu_pages * PAGE_SIZE); } -static unsigned long *dma_alloc_cpu_table(void) +unsigned long *dma_alloc_cpu_table(void) { unsigned long *table, *entry; @@ -33,7 +33,7 @@ static unsigned long *dma_alloc_cpu_table(void) return NULL; for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) - *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; + *entry = ZPCI_TABLE_INVALID; return table; } @@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void) return NULL; for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) - *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; + *entry = ZPCI_PTE_INVALID; return table; } @@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry) return pto; } -static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) +unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) { unsigned long *sto, *pto; unsigned int rtx, sx, px; @@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr return &pto[px]; } -static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr, - dma_addr_t dma_addr, int flags) +void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) { - unsigned long *entry; - - entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); - if (!entry) { - WARN_ON_ONCE(1); - return; - } - if (flags & ZPCI_PTE_INVALID) { invalidate_pt_entry(entry); - return; } else { set_pt_pfaa(entry, page_addr); validate_pt_entry(entry); @@ -146,17 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, u8 *page_addr = (u8 *) (pa & PAGE_MASK); dma_addr_t start_dma_addr = dma_addr; unsigned long irq_flags; + unsigned long *entry; int i, rc = 0; if (!nr_pages) return -EINVAL; spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); - if (!zdev->dma_table) + if (!zdev->dma_table) { + rc = -EINVAL; goto no_refresh; + } for (i = 0; i < nr_pages; i++) { - dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); + if (!entry) { + rc = -ENOMEM; + goto undo_cpu_trans; + } + dma_update_cpu_trans(entry, page_addr, flags); page_addr += PAGE_SIZE; dma_addr += PAGE_SIZE; } @@ -175,13 +173,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, nr_pages * PAGE_SIZE); +undo_cpu_trans: + if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { + flags = ZPCI_PTE_INVALID; + while (i-- > 0) { + page_addr -= PAGE_SIZE; + dma_addr -= PAGE_SIZE; + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); + if (!entry) + break; + dma_update_cpu_trans(entry, page_addr, flags); + } + } no_refresh: spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); return rc; } -static void dma_free_seg_table(unsigned long entry) +void dma_free_seg_table(unsigned long entry) { unsigned long *sto = get_rt_sto(entry); int sx; @@ -193,21 +203,18 @@ static void dma_free_seg_table(unsigned long entry) dma_free_cpu_table(sto); } -static void dma_cleanup_tables(struct zpci_dev *zdev) +void dma_cleanup_tables(unsigned long *table) { - unsigned long *table; int rtx; - if (!zdev || !zdev->dma_table) + if (!table) return; - table = zdev->dma_table; for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) if (reg_entry_isvalid(table[rtx])) dma_free_seg_table(table[rtx]); dma_free_cpu_table(table); - zdev->dma_table = NULL; } static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, @@ -262,53 +269,60 @@ out: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); } -int dma_set_mask(struct device *dev, u64 mask) +static inline void zpci_err_dma(unsigned long rc, unsigned long addr) { - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; + struct { + unsigned long rc; + unsigned long addr; + } __packed data = {rc, addr}; - *dev->dma_mask = mask; - return 0; + zpci_err_hex(&data, sizeof(data)); } -EXPORT_SYMBOL_GPL(dma_set_mask); static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long nr_pages, iommu_page_index; unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; + int ret; /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); - if (iommu_page_index == -1) + if (iommu_page_index == -1) { + ret = -ENOSPC; goto out_err; + } /* Use rounded up size */ size = nr_pages * PAGE_SIZE; dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; - if (dma_addr + size > zdev->end_dma) + if (dma_addr + size > zdev->end_dma) { + ret = -ERANGE; goto out_free; + } if (direction == DMA_NONE || direction == DMA_TO_DEVICE) flags |= ZPCI_TABLE_PROTECTED; - if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { - atomic64_add(nr_pages, &zdev->mapped_pages); - return dma_addr + (offset & ~PAGE_MASK); - } + ret = dma_update_trans(zdev, pa, dma_addr, size, flags); + if (ret) + goto out_free; + + atomic64_add(nr_pages, &zdev->mapped_pages); + return dma_addr + (offset & ~PAGE_MASK); out_free: dma_free_iommu(zdev, iommu_page_index, nr_pages); out_err: zpci_err("map error:\n"); - zpci_err_hex(&pa, sizeof(pa)); + zpci_err_dma(ret, pa); return DMA_ERROR_CODE; } @@ -316,16 +330,18 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long iommu_page_index; - int npages; + int npages, ret; npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr = dma_addr & PAGE_MASK; - if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, - ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { + ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, + ZPCI_PTE_INVALID); + if (ret) { zpci_err("unmap error:\n"); - zpci_err_hex(&dma_addr, sizeof(dma_addr)); + zpci_err_dma(ret, dma_addr); + return; } atomic64_add(npages, &zdev->unmapped_pages); @@ -337,7 +353,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct page *page; unsigned long pa; dma_addr_t map; @@ -367,7 +383,7 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); size = PAGE_ALIGN(size); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); @@ -426,6 +442,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev) { int rc; + /* + * At this point, if the device is part of an IOMMU domain, this would + * be a strong hint towards a bug in the IOMMU API (common) code and/or + * simultaneous access via IOMMU and DMA API. So let's issue a warning. + */ + WARN_ON(zdev->s390_domain); + spin_lock_init(&zdev->iommu_bitmap_lock); spin_lock_init(&zdev->dma_table_lock); @@ -460,8 +483,16 @@ out_clean: void zpci_dma_exit_device(struct zpci_dev *zdev) { + /* + * At this point, if the device is part of an IOMMU domain, this would + * be a strong hint towards a bug in the IOMMU API (common) code and/or + * simultaneous access via IOMMU and DMA API. So let's issue a warning. + */ + WARN_ON(zdev->s390_domain); + zpci_unregister_ioat(zdev, 0); - dma_cleanup_tables(zdev); + dma_cleanup_tables(zdev->dma_table); + zdev->dma_table = NULL; vfree(zdev->iommu_bitmap); zdev->iommu_bitmap = NULL; zdev->next_bit = 0; |