summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--kernel/drivers/gpu/drm/nouveau/nouveau_bo.c92
1 files changed, 45 insertions, 47 deletions
diff --git a/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c b/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce165..78f520d05 100644
--- a/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
- struct nvkm_fb_tile *tile = &pfb->tile.region[i];
- struct nvkm_engine *engine;
+ struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
if (tile->pitch)
- pfb->tile.fini(pfb, i, tile);
+ nvkm_fb_tile_fini(fb, i, tile);
if (pitch)
- pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
-
- pfb->tile.prog(pfb, i, tile);
+ nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
- if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
- engine->tile_prog(engine, i);
- if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
- engine->tile_prog(engine, i);
+ nvkm_fb_tile_prog(fb, i, tile);
}
static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
- for (i = 0; i < pfb->tile.regions; i++) {
+ for (i = 0; i < fb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
- } else if (tile && pfb->tile.region[i].pitch) {
+ } else if (tile && fb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}
@@ -214,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+ if (!nvxx_device(&drm->device)->func->cpu_coherent)
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
@@ -471,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_device(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
}
void
@@ -491,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_cpu(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
}
int
@@ -579,12 +574,11 @@ static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
uint32_t page_flags, struct page *dummy_read)
{
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct drm_device *dev = drm->dev;
- if (drm->agp.stat == ENABLED) {
- return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+ if (drm->agp.bridge) {
+ return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
page_flags, dummy_read);
}
#endif
@@ -636,12 +630,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
- if (drm->agp.stat != ENABLED)
+ if (!drm->agp.bridge)
man->func = &nv04_gart_manager;
else
man->func = &ttm_bo_manager_func;
- if (drm->agp.stat == ENABLED) {
+ if (drm->agp.bridge) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+ struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_fence *fence;
int ret;
@@ -1104,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static const struct {
const char *name;
int engine;
- u32 oclass;
+ s32 oclass;
int (*exec)(struct nouveau_channel *,
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nvif_object_init(chan->object, NULL,
+ ret = nvif_object_init(&chan->user,
mthd->oclass | (mthd->engine << 16),
mthd->oclass, NULL, 0,
&drm->ttm.copy);
@@ -1356,6 +1350,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct nvkm_mem *node = mem->mm_node;
int ret;
@@ -1371,11 +1366,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* System memory */
return 0;
case TTM_PL_TT:
-#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+#if IS_ENABLED(CONFIG_AGP)
+ if (drm->agp.bridge) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
+ mem->bus.is_iomem = !drm->agp.cma;
}
#endif
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+ mem->bus.base = device->func->resource_addr(device, 1);
mem->bus.is_iomem = true;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
struct nvkm_bar *bar = nvxx_bar(&drm->device);
+ int page_shift = 12;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ page_shift = node->page_shift;
- ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
- &node->bar_vma);
+ ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
+ &node->bar_vma);
if (ret)
return ret;
+ nvkm_vm_map(&node->bar_vma, node);
mem->bus.offset = node->bar_vma.offset;
}
break;
@@ -1406,14 +1405,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
- struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_bar *bar = nvxx_bar(&drm->device);
struct nvkm_mem *node = mem->mm_node;
if (!node->bar_vma.node)
return;
- bar->unmap(bar, &node->bar_vma);
+ nvkm_vm_unmap(&node->bar_vma);
+ nvkm_vm_put(&node->bar_vma);
}
static int
@@ -1421,8 +1419,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvif_device *device = &drm->device;
- u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
+ struct nvkm_device *device = nvxx_device(&drm->device);
+ u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached)
return ttm_dma_populate(ttm_dma, dev->dev);
-#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+#if IS_ENABLED(CONFIG_AGP)
+ if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm);
}
#endif
@@ -1553,20 +1551,20 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
return;
}
-#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+#if IS_ENABLED(CONFIG_AGP)
+ if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm);
return;
}