diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c')
-rw-r--r-- | kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 1037 |
1 files changed, 329 insertions, 708 deletions
diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index e10f96441..98970a0b7 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -22,486 +22,121 @@ * Authors: Ben Skeggs */ #include "gk104.h" +#include "changk104.h" #include <core/client.h> -#include <core/engctx.h> #include <core/enum.h> -#include <core/handle.h> +#include <core/gpuobj.h> #include <subdev/bar.h> -#include <subdev/fb.h> -#include <subdev/mmu.h> -#include <subdev/timer.h> +#include <engine/sw.h> #include <nvif/class.h> -#include <nvif/unpack.h> - -#define _(a,b) { (a), ((1ULL << (a)) | (b)) } -static const struct { - u64 subdev; - u64 mask; -} fifo_engine[] = { - _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_CE2)), - _(NVDEV_ENGINE_MSPDEC , 0), - _(NVDEV_ENGINE_MSPPP , 0), - _(NVDEV_ENGINE_MSVLD , 0), - _(NVDEV_ENGINE_CE0 , 0), - _(NVDEV_ENGINE_CE1 , 0), - _(NVDEV_ENGINE_MSENC , 0), -}; -#undef _ -#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) - -struct gk104_fifo_engn { - struct nvkm_gpuobj *runlist[2]; - int cur_runlist; - wait_queue_head_t wait; -}; - -struct gk104_fifo_priv { - struct nvkm_fifo base; - - struct work_struct fault; - u64 mask; - - struct gk104_fifo_engn engine[FIFO_ENGINE_NR]; - struct { - struct nvkm_gpuobj *mem; - struct nvkm_vma bar; - } user; - int spoon_nr; -}; - -struct gk104_fifo_base { - struct nvkm_fifo_base base; - struct nvkm_gpuobj *pgd; - struct nvkm_vm *vm; -}; - -struct gk104_fifo_chan { - struct nvkm_fifo_chan base; - u32 engine; - enum { - STOPPED, - RUNNING, - KILLED - } state; -}; - -/******************************************************************************* - * FIFO channel objects - ******************************************************************************/ - -static void -gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine) -{ - struct nvkm_bar *bar = nvkm_bar(priv); - struct gk104_fifo_engn *engn = &priv->engine[engine]; - struct nvkm_gpuobj *cur; - int i, p; - - mutex_lock(&nv_subdev(priv)->mutex); - cur = engn->runlist[engn->cur_runlist]; - engn->cur_runlist = !engn->cur_runlist; - - for (i = 0, p = 0; i < priv->base.max; i++) { - struct gk104_fifo_chan *chan = (void *)priv->base.channel[i]; - if (chan && chan->state == RUNNING && chan->engine == engine) { - nv_wo32(cur, p + 0, i); - nv_wo32(cur, p + 4, 0x00000000); - p += 8; - } - } - bar->flush(bar); - - nv_wr32(priv, 0x002270, cur->addr >> 12); - nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); - - if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 + - (engine * 0x08)) & 0x00100000), - msecs_to_jiffies(2000)) == 0) - nv_error(priv, "runlist %d update timeout\n", engine); - mutex_unlock(&nv_subdev(priv)->mutex); -} -static int -gk104_fifo_context_attach(struct nvkm_object *parent, - struct nvkm_object *object) +void +gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) { - struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_base *base = (void *)parent->parent; - struct nvkm_engctx *ectx = (void *)object; - u32 addr; - int ret; - - switch (nv_engidx(object->engine)) { - case NVDEV_ENGINE_SW : - return 0; - case NVDEV_ENGINE_CE0: - case NVDEV_ENGINE_CE1: - case NVDEV_ENGINE_CE2: - nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; - return 0; - case NVDEV_ENGINE_GR : addr = 0x0210; break; - case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; - case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; - case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; - default: - return -EINVAL; - } - - if (!ectx->vma.node) { - ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, - NV_MEM_ACCESS_RW, &ectx->vma); - if (ret) - return ret; - - nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; - } - - nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); - nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); - bar->flush(bar); - return 0; + struct nvkm_device *device = fifo->engine.subdev.device; + nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); } -static int -gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, - struct nvkm_object *object) +void +gk104_fifo_uevent_init(struct nvkm_fifo *fifo) { - struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_priv *priv = (void *)parent->engine; - struct gk104_fifo_base *base = (void *)parent->parent; - struct gk104_fifo_chan *chan = (void *)parent; - u32 addr; - - switch (nv_engidx(object->engine)) { - case NVDEV_ENGINE_SW : return 0; - case NVDEV_ENGINE_CE0 : - case NVDEV_ENGINE_CE1 : - case NVDEV_ENGINE_CE2 : addr = 0x0000; break; - case NVDEV_ENGINE_GR : addr = 0x0210; break; - case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; - case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; - case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; - default: - return -EINVAL; - } - - nv_wr32(priv, 0x002634, chan->base.chid); - if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { - nv_error(priv, "channel %d [%s] kick timeout\n", - chan->base.chid, nvkm_client_name(chan)); - if (suspend) - return -EBUSY; - } - - if (addr) { - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); - bar->flush(bar); - } - - return 0; + struct nvkm_device *device = fifo->engine.subdev.device; + nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); } -static int -gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +void +gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) { - union { - struct kepler_channel_gpfifo_a_v0 v0; - } *args = data; - struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_priv *priv = (void *)engine; - struct gk104_fifo_base *base = (void *)parent; + struct gk104_fifo_engn *engn = &fifo->engine[engine]; struct gk104_fifo_chan *chan; - u64 usermem, ioffset, ilength; - int ret, i; - - nv_ioctl(parent, "create channel gpfifo size %d\n", size); - if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " - "ioffset %016llx ilength %08x engine %08x\n", - args->v0.version, args->v0.pushbuf, args->v0.ioffset, - args->v0.ilength, args->v0.engine); - } else - return ret; - - for (i = 0; i < FIFO_ENGINE_NR; i++) { - if (args->v0.engine & (1 << i)) { - if (nvkm_engine(parent, fifo_engine[i].subdev)) { - args->v0.engine = (1 << i); - break; - } - } - } - - if (i == FIFO_ENGINE_NR) { - nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine); - return -ENODEV; - } - - ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, - priv->user.bar.offset, 0x200, - args->v0.pushbuf, - fifo_engine[i].mask, &chan); - *pobject = nv_object(chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nv_parent(chan)->context_attach = gk104_fifo_context_attach; - nv_parent(chan)->context_detach = gk104_fifo_context_detach; - chan->engine = i; - - usermem = chan->base.chid * 0x200; - ioffset = args->v0.ioffset; - ilength = order_base_2(args->v0.ilength / 8); - - for (i = 0; i < 0x200; i += 4) - nv_wo32(priv->user.mem, usermem + i, 0x00000000); - - nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); - nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); - nv_wo32(base, 0x10, 0x0000face); - nv_wo32(base, 0x30, 0xfffff902); - nv_wo32(base, 0x48, lower_32_bits(ioffset)); - nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); - nv_wo32(base, 0x84, 0x20400000); - nv_wo32(base, 0x94, 0x30000001); - nv_wo32(base, 0x9c, 0x00000100); - nv_wo32(base, 0xac, 0x0000001f); - nv_wo32(base, 0xe8, chan->base.chid); - nv_wo32(base, 0xb8, 0xf8000000); - nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ - nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ - bar->flush(bar); - return 0; -} - -static int -gk104_fifo_chan_init(struct nvkm_object *object) -{ - struct nvkm_gpuobj *base = nv_gpuobj(object->parent); - struct gk104_fifo_priv *priv = (void *)object->engine; - struct gk104_fifo_chan *chan = (void *)object; - u32 chid = chan->base.chid; - int ret; + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_memory *cur; + int nr = 0; - ret = nvkm_fifo_channel_init(&chan->base); - if (ret) - return ret; - - nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); - nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); - - if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { - nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); - gk104_fifo_runlist_update(priv, chan->engine); - nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); - } - - return 0; -} - -static int -gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) -{ - struct gk104_fifo_priv *priv = (void *)object->engine; - struct gk104_fifo_chan *chan = (void *)object; - u32 chid = chan->base.chid; + mutex_lock(&subdev->mutex); + cur = engn->runlist[engn->cur_runlist]; + engn->cur_runlist = !engn->cur_runlist; - if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { - nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); - gk104_fifo_runlist_update(priv, chan->engine); + nvkm_kmap(cur); + list_for_each_entry(chan, &engn->chan, head) { + nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid); + nvkm_wo32(cur, (nr * 8) + 4, 0x00000000); + nr++; } + nvkm_done(cur); - nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); - return nvkm_fifo_channel_fini(&chan->base, suspend); -} - -struct nvkm_ofuncs -gk104_fifo_chan_ofuncs = { - .ctor = gk104_fifo_chan_ctor, - .dtor = _nvkm_fifo_channel_dtor, - .init = gk104_fifo_chan_init, - .fini = gk104_fifo_chan_fini, - .map = _nvkm_fifo_channel_map, - .rd32 = _nvkm_fifo_channel_rd32, - .wr32 = _nvkm_fifo_channel_wr32, - .ntfy = _nvkm_fifo_channel_ntfy -}; - -static struct nvkm_oclass -gk104_fifo_sclass[] = { - { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, - {} -}; - -/******************************************************************************* - * FIFO context - instmem heap and vm setup - ******************************************************************************/ - -static int -gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) -{ - struct gk104_fifo_base *base; - int ret; - - ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); - *pobject = nv_object(base); - if (ret) - return ret; - - ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, - &base->pgd); - if (ret) - return ret; - - nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); - nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); - nv_wo32(base, 0x0208, 0xffffffff); - nv_wo32(base, 0x020c, 0x000000ff); - - ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); - if (ret) - return ret; - - return 0; -} - -static void -gk104_fifo_context_dtor(struct nvkm_object *object) -{ - struct gk104_fifo_base *base = (void *)object; - nvkm_vm_ref(NULL, &base->vm, base->pgd); - nvkm_gpuobj_ref(NULL, &base->pgd); - nvkm_fifo_context_destroy(&base->base); -} - -static struct nvkm_oclass -gk104_fifo_cclass = { - .handle = NV_ENGCTX(FIFO, 0xe0), - .ofuncs = &(struct nvkm_ofuncs) { - .ctor = gk104_fifo_context_ctor, - .dtor = gk104_fifo_context_dtor, - .init = _nvkm_fifo_context_init, - .fini = _nvkm_fifo_context_fini, - .rd32 = _nvkm_fifo_context_rd32, - .wr32 = _nvkm_fifo_context_wr32, - }, -}; + nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); + nvkm_wr32(device, 0x002274, (engine << 20) | nr); -/******************************************************************************* - * PFIFO engine - ******************************************************************************/ - -static inline int -gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn) -{ - switch (engn) { - case NVDEV_ENGINE_GR : - case NVDEV_ENGINE_CE2 : engn = 0; break; - case NVDEV_ENGINE_MSVLD : engn = 1; break; - case NVDEV_ENGINE_MSPPP : engn = 2; break; - case NVDEV_ENGINE_MSPDEC: engn = 3; break; - case NVDEV_ENGINE_CE0 : engn = 4; break; - case NVDEV_ENGINE_CE1 : engn = 5; break; - case NVDEV_ENGINE_MSENC : engn = 6; break; - default: - return -1; - } - - return engn; + if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + + (engine * 0x08)) & 0x00100000), + msecs_to_jiffies(2000)) == 0) + nvkm_error(subdev, "runlist %d update timeout\n", engine); + mutex_unlock(&subdev->mutex); } static inline struct nvkm_engine * -gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn) +gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) { - if (engn >= ARRAY_SIZE(fifo_engine)) - return NULL; - return nvkm_engine(priv, fifo_engine[engn].subdev); + struct nvkm_device *device = fifo->base.engine.subdev.device; + u64 subdevs = gk104_fifo_engine_subdev(engn); + if (subdevs) + return nvkm_device_engine(device, __ffs(subdevs)); + return NULL; } static void gk104_fifo_recover_work(struct work_struct *work) { - struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault); - struct nvkm_object *engine; + struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); + struct nvkm_device *device = fifo->base.engine.subdev.device; + struct nvkm_engine *engine; unsigned long flags; u32 engn, engm = 0; u64 mask, todo; - spin_lock_irqsave(&priv->base.lock, flags); - mask = priv->mask; - priv->mask = 0ULL; - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); + mask = fifo->mask; + fifo->mask = 0ULL; + spin_unlock_irqrestore(&fifo->base.lock, flags); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) - engm |= 1 << gk104_fifo_engidx(priv, engn); - nv_mask(priv, 0x002630, engm, engm); + engm |= 1 << gk104_fifo_subdev_engine(engn); + nvkm_mask(device, 0x002630, engm, engm); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { - if ((engine = (void *)nvkm_engine(priv, engn))) { - nv_ofuncs(engine)->fini(engine, false); - WARN_ON(nv_ofuncs(engine)->init(engine)); + if ((engine = nvkm_device_engine(device, engn))) { + nvkm_subdev_fini(&engine->subdev, false); + WARN_ON(nvkm_subdev_init(&engine->subdev)); } - gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn)); + gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn)); } - nv_wr32(priv, 0x00262c, engm); - nv_mask(priv, 0x002630, engm, 0x00000000); + nvkm_wr32(device, 0x00262c, engm); + nvkm_mask(device, 0x002630, engm, 0x00000000); } static void -gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine, +gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, struct gk104_fifo_chan *chan) { + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; u32 chid = chan->base.chid; - unsigned long flags; - nv_error(priv, "%s engine fault on channel %d, recovering...\n", - nv_subdev(engine)->name, chid); + nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", + nvkm_subdev_name[engine->subdev.index], chid); + assert_spin_locked(&fifo->base.lock); - nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); - chan->state = KILLED; + nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); + list_del_init(&chan->head); + chan->killed = true; - spin_lock_irqsave(&priv->base.lock, flags); - priv->mask |= 1ULL << nv_engidx(engine); - spin_unlock_irqrestore(&priv->base.lock, flags); - schedule_work(&priv->fault); -} - -static int -gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data) -{ - struct gk104_fifo_chan *chan = NULL; - struct nvkm_handle *bind; - unsigned long flags; - int ret = -EINVAL; - - spin_lock_irqsave(&priv->base.lock, flags); - if (likely(chid >= priv->base.min && chid <= priv->base.max)) - chan = (void *)priv->base.channel[chid]; - if (unlikely(!chan)) - goto out; - - bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e); - if (likely(bind)) { - if (!mthd || !nv_call(bind->object, mthd, data)) - ret = 0; - nvkm_namedb_put(bind); - } - -out: - spin_unlock_irqrestore(&priv->base.lock, flags); - return ret; + fifo->mask |= 1ULL << engine->subdev.index; + schedule_work(&fifo->fault); } static const struct nvkm_enum @@ -516,18 +151,16 @@ gk104_fifo_bind_reason[] = { }; static void -gk104_fifo_intr_bind(struct gk104_fifo_priv *priv) +gk104_fifo_intr_bind(struct gk104_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x00252c); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 intr = nvkm_rd32(device, 0x00252c); u32 code = intr & 0x000000ff; - const struct nvkm_enum *en; - char enunk[6] = ""; - - en = nvkm_enum_find(gk104_fifo_bind_reason, code); - if (!en) - snprintf(enunk, sizeof(enunk), "UNK%02x", code); + const struct nvkm_enum *en = + nvkm_enum_find(gk104_fifo_bind_reason, code); - nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk); + nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); } static const struct nvkm_enum @@ -537,14 +170,17 @@ gk104_fifo_sched_reason[] = { }; static void -gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv) +gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) { + struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_engine *engine; struct gk104_fifo_chan *chan; + unsigned long flags; u32 engn; - for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { - u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); + spin_lock_irqsave(&fifo->base.lock, flags); + for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) { + u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); u32 busy = (stat & 0x80000000); u32 next = (stat & 0x07ff0000) >> 16; u32 chsw = (stat & 0x00008000); @@ -555,32 +191,35 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv) (void)save; if (busy && chsw) { - if (!(chan = (void *)priv->base.channel[chid])) - continue; - if (!(engine = gk104_fifo_engine(priv, engn))) - continue; - gk104_fifo_recover(priv, engine, chan); + list_for_each_entry(chan, &fifo->engine[engn].chan, head) { + if (chan->base.chid == chid) { + engine = gk104_fifo_engine(fifo, engn); + if (!engine) + break; + gk104_fifo_recover(fifo, engine, chan); + break; + } + } } } + spin_unlock_irqrestore(&fifo->base.lock, flags); } static void -gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) +gk104_fifo_intr_sched(struct gk104_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x00254c); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 intr = nvkm_rd32(device, 0x00254c); u32 code = intr & 0x000000ff; - const struct nvkm_enum *en; - char enunk[6] = ""; - - en = nvkm_enum_find(gk104_fifo_sched_reason, code); - if (!en) - snprintf(enunk, sizeof(enunk), "UNK%02x", code); + const struct nvkm_enum *en = + nvkm_enum_find(gk104_fifo_sched_reason, code); - nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); + nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); switch (code) { case 0x0a: - gk104_fifo_intr_sched_ctxsw(priv); + gk104_fifo_intr_sched_ctxsw(fifo); break; default: break; @@ -588,38 +227,42 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) } static void -gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv) +gk104_fifo_intr_chsw(struct gk104_fifo *fifo) { - u32 stat = nv_rd32(priv, 0x00256c); - nv_error(priv, "CHSW_ERROR 0x%08x\n", stat); - nv_wr32(priv, 0x00256c, stat); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x00256c); + nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); + nvkm_wr32(device, 0x00256c, stat); } static void -gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv) +gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) { - u32 stat = nv_rd32(priv, 0x00259c); - nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x00259c); + nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); } static const struct nvkm_enum gk104_fifo_fault_engine[] = { - { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, - { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB }, - { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, - { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, - { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, - { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, - { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, - { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD }, - { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP }, + { 0x00, "GR", NULL, NVKM_ENGINE_GR }, + { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, + { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, + { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM }, + { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO }, + { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO }, + { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO }, + { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, + { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, { 0x13, "PERF" }, - { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, - { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 }, - { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 }, + { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, + { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, + { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, { 0x17, "PMU" }, - { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC }, - { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 }, + { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, + { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, {} }; @@ -708,80 +351,65 @@ gk104_fifo_fault_gpcclient[] = { }; static void -gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) { - u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); - u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); - u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); - u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); + u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); + u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); + u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); u32 gpc = (stat & 0x1f000000) >> 24; u32 client = (stat & 0x00001f00) >> 8; u32 write = (stat & 0x00000080); u32 hub = (stat & 0x00000040); u32 reason = (stat & 0x0000000f); - struct nvkm_object *engctx = NULL, *object; - struct nvkm_engine *engine = NULL; const struct nvkm_enum *er, *eu, *ec; - char erunk[6] = ""; - char euunk[6] = ""; - char ecunk[6] = ""; - char gpcid[3] = ""; + struct nvkm_engine *engine = NULL; + struct nvkm_fifo_chan *chan; + unsigned long flags; + char gpcid[8] = ""; er = nvkm_enum_find(gk104_fifo_fault_reason, reason); - if (!er) - snprintf(erunk, sizeof(erunk), "UNK%02X", reason); - eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); + if (hub) { + ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); + } else { + ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); + snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); + } + if (eu) { switch (eu->data2) { - case NVDEV_SUBDEV_BAR: - nv_mask(priv, 0x001704, 0x00000000, 0x00000000); + case NVKM_SUBDEV_BAR: + nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); break; - case NVDEV_SUBDEV_INSTMEM: - nv_mask(priv, 0x001714, 0x00000000, 0x00000000); + case NVKM_SUBDEV_INSTMEM: + nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); break; - case NVDEV_ENGINE_IFB: - nv_mask(priv, 0x001718, 0x00000000, 0x00000000); + case NVKM_ENGINE_IFB: + nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_engine(priv, eu->data2); - if (engine) - engctx = nvkm_engctx_get(engine, inst); + engine = nvkm_device_engine(device, eu->data2); break; } - } else { - snprintf(euunk, sizeof(euunk), "UNK%02x", unit); } - if (hub) { - ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); - } else { - ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); - snprintf(gpcid, sizeof(gpcid), "%d", gpc); - } + chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags); - if (!ec) - snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); - - nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " - "channel 0x%010llx [%s]\n", write ? "write" : "read", - (u64)vahi << 32 | valo, er ? er->name : erunk, - eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", - ec ? ec->name : ecunk, (u64)inst << 12, - nvkm_client_name(engctx)); - - object = engctx; - while (object) { - switch (nv_mclass(object)) { - case KEPLER_CHANNEL_GPFIFO_A: - case MAXWELL_CHANNEL_GPFIFO_A: - gk104_fifo_recover(priv, engine, (void *)object); - break; - } - object = object->parent; - } + nvkm_error(subdev, + "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " + "reason %02x [%s] on channel %d [%010llx %s]\n", + write ? "write" : "read", (u64)vahi << 32 | valo, + unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", + reason, er ? er->name : "", chan ? chan->chid : -1, + (u64)inst << 12, + chan ? chan->object.client->name : "unknown"); - nvkm_engctx_put(engctx); + if (engine && chan) + gk104_fifo_recover(fifo, engine, (void *)chan); + nvkm_fifo_chan_put(&fifo->base, flags, &chan); } static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { @@ -819,35 +447,42 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { }; static void -gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) { - u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000)); - u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask; - u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); - u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); - u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); + u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; + u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); + u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); + u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00003ffc); u32 show = stat; + struct nvkm_fifo_chan *chan; + unsigned long flags; + char msg[128]; if (stat & 0x00800000) { - if (!gk104_fifo_swmthd(priv, chid, mthd, data)) - show &= ~0x00800000; - nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); + if (device->sw) { + if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) + show &= ~0x00800000; + } + nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); } if (show) { - nv_error(priv, "PBDMA%d:", unit); - nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show); - pr_cont("\n"); - nv_error(priv, - "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", - unit, chid, - nvkm_client_name_for_fifo_chid(&priv->base, chid), - subc, mthd, data); + nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); + chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); + nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " + "subc %d mthd %04x data %08x\n", + unit, show, msg, chid, chan ? chan->inst->addr : 0, + chan ? chan->object.client->name : "unknown", + subc, mthd, data); + nvkm_fifo_chan_put(&fifo->base, flags, &chan); } - nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); + nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); } static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { @@ -860,280 +495,266 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { }; static void -gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) { - u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000)); - u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask; - u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); + u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; + u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; + char msg[128]; if (stat) { - nv_error(priv, "PBDMA%d:", unit); - nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat); - pr_cont("\n"); - nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid, - nv_rd32(priv, 0x040150 + (unit * 0x2000)), - nv_rd32(priv, 0x040154 + (unit * 0x2000))); + nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); + nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", + unit, stat, msg, chid, + nvkm_rd32(device, 0x040150 + (unit * 0x2000)), + nvkm_rd32(device, 0x040154 + (unit * 0x2000))); } - nv_wr32(priv, 0x040148 + (unit * 0x2000), stat); + nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); } static void -gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv) +gk104_fifo_intr_runlist(struct gk104_fifo *fifo) { - u32 mask = nv_rd32(priv, 0x002a00); + struct nvkm_device *device = fifo->base.engine.subdev.device; + u32 mask = nvkm_rd32(device, 0x002a00); while (mask) { u32 engn = __ffs(mask); - wake_up(&priv->engine[engn].wait); - nv_wr32(priv, 0x002a00, 1 << engn); + wake_up(&fifo->engine[engn].wait); + nvkm_wr32(device, 0x002a00, 1 << engn); mask &= ~(1 << engn); } } static void -gk104_fifo_intr_engine(struct gk104_fifo_priv *priv) +gk104_fifo_intr_engine(struct gk104_fifo *fifo) { - nvkm_fifo_uevent(&priv->base); + nvkm_fifo_uevent(&fifo->base); } -static void -gk104_fifo_intr(struct nvkm_subdev *subdev) +void +gk104_fifo_intr(struct nvkm_fifo *base) { - struct gk104_fifo_priv *priv = (void *)subdev; - u32 mask = nv_rd32(priv, 0x002140); - u32 stat = nv_rd32(priv, 0x002100) & mask; + struct gk104_fifo *fifo = gk104_fifo(base); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 mask = nvkm_rd32(device, 0x002140); + u32 stat = nvkm_rd32(device, 0x002100) & mask; if (stat & 0x00000001) { - gk104_fifo_intr_bind(priv); - nv_wr32(priv, 0x002100, 0x00000001); + gk104_fifo_intr_bind(fifo); + nvkm_wr32(device, 0x002100, 0x00000001); stat &= ~0x00000001; } if (stat & 0x00000010) { - nv_error(priv, "PIO_ERROR\n"); - nv_wr32(priv, 0x002100, 0x00000010); + nvkm_error(subdev, "PIO_ERROR\n"); + nvkm_wr32(device, 0x002100, 0x00000010); stat &= ~0x00000010; } if (stat & 0x00000100) { - gk104_fifo_intr_sched(priv); - nv_wr32(priv, 0x002100, 0x00000100); + gk104_fifo_intr_sched(fifo); + nvkm_wr32(device, 0x002100, 0x00000100); stat &= ~0x00000100; } if (stat & 0x00010000) { - gk104_fifo_intr_chsw(priv); - nv_wr32(priv, 0x002100, 0x00010000); + gk104_fifo_intr_chsw(fifo); + nvkm_wr32(device, 0x002100, 0x00010000); stat &= ~0x00010000; } if (stat & 0x00800000) { - nv_error(priv, "FB_FLUSH_TIMEOUT\n"); - nv_wr32(priv, 0x002100, 0x00800000); + nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); + nvkm_wr32(device, 0x002100, 0x00800000); stat &= ~0x00800000; } if (stat & 0x01000000) { - nv_error(priv, "LB_ERROR\n"); - nv_wr32(priv, 0x002100, 0x01000000); + nvkm_error(subdev, "LB_ERROR\n"); + nvkm_wr32(device, 0x002100, 0x01000000); stat &= ~0x01000000; } if (stat & 0x08000000) { - gk104_fifo_intr_dropped_fault(priv); - nv_wr32(priv, 0x002100, 0x08000000); + gk104_fifo_intr_dropped_fault(fifo); + nvkm_wr32(device, 0x002100, 0x08000000); stat &= ~0x08000000; } if (stat & 0x10000000) { - u32 mask = nv_rd32(priv, 0x00259c); + u32 mask = nvkm_rd32(device, 0x00259c); while (mask) { u32 unit = __ffs(mask); - gk104_fifo_intr_fault(priv, unit); - nv_wr32(priv, 0x00259c, (1 << unit)); + gk104_fifo_intr_fault(fifo, unit); + nvkm_wr32(device, 0x00259c, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x10000000; } if (stat & 0x20000000) { - u32 mask = nv_rd32(priv, 0x0025a0); + u32 mask = nvkm_rd32(device, 0x0025a0); while (mask) { u32 unit = __ffs(mask); - gk104_fifo_intr_pbdma_0(priv, unit); - gk104_fifo_intr_pbdma_1(priv, unit); - nv_wr32(priv, 0x0025a0, (1 << unit)); + gk104_fifo_intr_pbdma_0(fifo, unit); + gk104_fifo_intr_pbdma_1(fifo, unit); + nvkm_wr32(device, 0x0025a0, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x20000000; } if (stat & 0x40000000) { - gk104_fifo_intr_runlist(priv); + gk104_fifo_intr_runlist(fifo); stat &= ~0x40000000; } if (stat & 0x80000000) { - nv_wr32(priv, 0x002100, 0x80000000); - gk104_fifo_intr_engine(priv); + nvkm_wr32(device, 0x002100, 0x80000000); + gk104_fifo_intr_engine(fifo); stat &= ~0x80000000; } if (stat) { - nv_error(priv, "INTR 0x%08x\n", stat); - nv_mask(priv, 0x002140, stat, 0x00000000); - nv_wr32(priv, 0x002100, stat); + nvkm_error(subdev, "INTR %08x\n", stat); + nvkm_mask(device, 0x002140, stat, 0x00000000); + nvkm_wr32(device, 0x002100, stat); } } -static void -gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index) +void +gk104_fifo_fini(struct nvkm_fifo *base) { - struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); - nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); + struct gk104_fifo *fifo = gk104_fifo(base); + struct nvkm_device *device = fifo->base.engine.subdev.device; + flush_work(&fifo->fault); + /* allow mmu fault interrupts, even when we're not using fifo */ + nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); } -static void -gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) +int +gk104_fifo_oneinit(struct nvkm_fifo *base) { - struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); - nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); -} + struct gk104_fifo *fifo = gk104_fifo(base); + struct nvkm_device *device = fifo->base.engine.subdev.device; + int ret, i; -static const struct nvkm_event_func -gk104_fifo_uevent_func = { - .ctor = nvkm_fifo_uevent_ctor, - .init = gk104_fifo_uevent_init, - .fini = gk104_fifo_uevent_fini, -}; + for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + 0x8000, 0x1000, false, + &fifo->engine[i].runlist[0]); + if (ret) + return ret; -int -gk104_fifo_fini(struct nvkm_object *object, bool suspend) -{ - struct gk104_fifo_priv *priv = (void *)object; - int ret; + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + 0x8000, 0x1000, false, + &fifo->engine[i].runlist[1]); + if (ret) + return ret; + + init_waitqueue_head(&fifo->engine[i].wait); + INIT_LIST_HEAD(&fifo->engine[i].chan); + } - ret = nvkm_fifo_fini(&priv->base, suspend); + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + fifo->base.nr * 0x200, 0x1000, true, + &fifo->user.mem); if (ret) return ret; - /* allow mmu fault interrupts, even when we're not using fifo */ - nv_mask(priv, 0x002140, 0x10000000, 0x10000000); + ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12, + &fifo->user.bar); + if (ret) + return ret; + + nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); return 0; } -int -gk104_fifo_init(struct nvkm_object *object) +void +gk104_fifo_init(struct nvkm_fifo *base) { - struct gk104_fifo_priv *priv = (void *)object; - int ret, i; - - ret = nvkm_fifo_init(&priv->base); - if (ret) - return ret; + struct gk104_fifo *fifo = gk104_fifo(base); + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_device *device = subdev->device; + int i; /* enable all available PBDMA units */ - nv_wr32(priv, 0x000204, 0xffffffff); - priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); - nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); + nvkm_wr32(device, 0x000204, 0xffffffff); + fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204)); + nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr); /* PBDMA[n] */ - for (i = 0; i < priv->spoon_nr; i++) { - nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); - nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ - nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ + for (i = 0; i < fifo->spoon_nr; i++) { + nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ } /* PBDMA[n].HCE */ - for (i = 0; i < priv->spoon_nr; i++) { - nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ - nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ + for (i = 0; i < fifo->spoon_nr; i++) { + nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ + nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ } - nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); + nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); - nv_wr32(priv, 0x002100, 0xffffffff); - nv_wr32(priv, 0x002140, 0x7fffffff); - return 0; + nvkm_wr32(device, 0x002100, 0xffffffff); + nvkm_wr32(device, 0x002140, 0x7fffffff); } -void -gk104_fifo_dtor(struct nvkm_object *object) +void * +gk104_fifo_dtor(struct nvkm_fifo *base) { - struct gk104_fifo_priv *priv = (void *)object; + struct gk104_fifo *fifo = gk104_fifo(base); int i; - nvkm_gpuobj_unmap(&priv->user.bar); - nvkm_gpuobj_ref(NULL, &priv->user.mem); + nvkm_vm_put(&fifo->user.bar); + nvkm_memory_del(&fifo->user.mem); - for (i = 0; i < FIFO_ENGINE_NR; i++) { - nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]); - nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]); + for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { + nvkm_memory_del(&fifo->engine[i].runlist[1]); + nvkm_memory_del(&fifo->engine[i].runlist[0]); } - nvkm_fifo_destroy(&priv->base); + return fifo; } int -gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, + int index, int nr, struct nvkm_fifo **pfifo) { - struct gk104_fifo_impl *impl = (void *)oclass; - struct gk104_fifo_priv *priv; - int ret, i; + struct gk104_fifo *fifo; - ret = nvkm_fifo_create(parent, engine, oclass, 0, - impl->channels - 1, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - INIT_WORK(&priv->fault, gk104_fifo_recover_work); - - for (i = 0; i < FIFO_ENGINE_NR; i++) { - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, - 0, &priv->engine[i].runlist[0]); - if (ret) - return ret; - - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, - 0, &priv->engine[i].runlist[1]); - if (ret) - return ret; - - init_waitqueue_head(&priv->engine[i].wait); - } - - ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); - if (ret) - return ret; + if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) + return -ENOMEM; + INIT_WORK(&fifo->fault, gk104_fifo_recover_work); + *pfifo = &fifo->base; - ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, - &priv->user.bar); - if (ret) - return ret; - - ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = gk104_fifo_intr; - nv_engine(priv)->cclass = &gk104_fifo_cclass; - nv_engine(priv)->sclass = gk104_fifo_sclass; - return 0; + return nvkm_fifo_ctor(func, device, index, nr, &fifo->base); } -struct nvkm_oclass * -gk104_fifo_oclass = &(struct gk104_fifo_impl) { - .base.handle = NV_ENGINE(FIFO, 0xe0), - .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = gk104_fifo_ctor, - .dtor = gk104_fifo_dtor, - .init = gk104_fifo_init, - .fini = gk104_fifo_fini, +static const struct nvkm_fifo_func +gk104_fifo = { + .dtor = gk104_fifo_dtor, + .oneinit = gk104_fifo_oneinit, + .init = gk104_fifo_init, + .fini = gk104_fifo_fini, + .intr = gk104_fifo_intr, + .uevent_init = gk104_fifo_uevent_init, + .uevent_fini = gk104_fifo_uevent_fini, + .chan = { + &gk104_fifo_gpfifo_oclass, + NULL }, - .channels = 4096, -}.base; +}; + +int +gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +{ + return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); +} |