From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild | 7 +- kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c | 188 ++++++---- kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c | 239 ------------- kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c | 154 +++++++-- kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c | 28 +- kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | 379 +++++++++----------- kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c | 221 ------------ kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | 395 +++++++++------------ kernel/drivers/gpu/drm/nouveau/nvkm/core/memory.c | 64 ++++ kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c | 2 +- kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c | 199 ----------- kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c | 400 +++++++++------------- kernel/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c | 200 +++++++++++ kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c | 20 +- kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c | 159 --------- kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c | 103 ------ kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c | 144 +++++--- kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c | 208 +++++++---- 18 files changed, 1261 insertions(+), 1849 deletions(-) delete mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c delete mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/memory.c delete mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c create mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c delete mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c delete mode 100644 kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c (limited to 'kernel/drivers/gpu/drm/nouveau/nvkm/core') diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild index a2bdb2069..7f66963f3 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/Kbuild @@ -1,17 +1,14 @@ nvkm-y := nvkm/core/client.o -nvkm-y += nvkm/core/engctx.o nvkm-y += nvkm/core/engine.o nvkm-y += nvkm/core/enum.o nvkm-y += nvkm/core/event.o nvkm-y += nvkm/core/gpuobj.o -nvkm-y += nvkm/core/handle.o nvkm-y += nvkm/core/ioctl.o +nvkm-y += nvkm/core/memory.o nvkm-y += nvkm/core/mm.o -nvkm-y += nvkm/core/namedb.o nvkm-y += nvkm/core/notify.o nvkm-y += nvkm/core/object.o +nvkm-y += nvkm/core/oproxy.o nvkm-y += nvkm/core/option.o -nvkm-y += nvkm/core/parent.o -nvkm-y += nvkm/core/printk.o nvkm-y += nvkm/core/ramht.o nvkm-y += nvkm/core/subdev.o diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c index 878a82f8f..297e1e953 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -23,7 +23,6 @@ */ #include #include -#include #include #include @@ -91,7 +90,7 @@ int nvkm_client_notify_new(struct nvkm_object *object, struct nvkm_event *event, void *data, u32 size) { - struct nvkm_client *client = nvkm_client(object); + struct nvkm_client *client = object->client; struct nvkm_client_notify *notify; union { struct nvif_notify_req_v0 v0; @@ -111,11 +110,11 @@ nvkm_client_notify_new(struct nvkm_object *object, if (!notify) return -ENOMEM; - nv_ioctl(client, "notify new size %d\n", size); + nvif_ioctl(object, "notify new size %d\n", size); if (nvif_unpack(req->v0, 0, 0, true)) { - nv_ioctl(client, "notify new vers %d reply %d route %02x " - "token %llx\n", req->v0.version, - req->v0.reply, req->v0.route, req->v0.token); + nvif_ioctl(object, "notify new vers %d reply %d route %02x " + "token %llx\n", req->v0.version, + req->v0.reply, req->v0.route, req->v0.token); notify->version = req->v0.version; notify->size = sizeof(notify->rep.v0); notify->rep.v0.version = req->v0.version; @@ -146,10 +145,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size) } *args = data; int ret; - nv_ioctl(object, "client devlist size %d\n", size); + nvif_ioctl(object, "client devlist size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - nv_ioctl(object, "client devlist vers %d count %d\n", - args->v0.version, args->v0.count); + nvif_ioctl(object, "client devlist vers %d count %d\n", + args->v0.version, args->v0.count); if (size == sizeof(args->v0.device[0]) * args->v0.count) { ret = nvkm_device_list(args->v0.device, args->v0.count); if (ret >= 0) { @@ -176,91 +175,134 @@ nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) return -EINVAL; } -static void -nvkm_client_dtor(struct nvkm_object *object) +static int +nvkm_client_child_new(const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) { - struct nvkm_client *client = (void *)object; - int i; - for (i = 0; i < ARRAY_SIZE(client->notify); i++) - nvkm_client_notify_del(client, i); - nvkm_object_ref(NULL, &client->device); - nvkm_handle_destroy(client->root); - nvkm_namedb_destroy(&client->namedb); + return oclass->base.ctor(oclass, data, size, pobject); } -static struct nvkm_oclass -nvkm_client_oclass = { - .ofuncs = &(struct nvkm_ofuncs) { - .dtor = nvkm_client_dtor, - .mthd = nvkm_client_mthd, - }, -}; - -int -nvkm_client_create_(const char *name, u64 devname, const char *cfg, - const char *dbg, int length, void **pobject) +static int +nvkm_client_child_get(struct nvkm_object *object, int index, + struct nvkm_oclass *oclass) { - struct nvkm_object *device; - struct nvkm_client *client; - int ret; + const struct nvkm_sclass *sclass; + + switch (index) { + case 0: sclass = &nvkm_udevice_sclass; break; + default: + return -EINVAL; + } - device = (void *)nvkm_device_find(devname); - if (!device) - return -ENODEV; + oclass->ctor = nvkm_client_child_new; + oclass->base = *sclass; + return 0; +} + +static const struct nvkm_object_func +nvkm_client_object_func = { + .mthd = nvkm_client_mthd, + .sclass = nvkm_client_child_get, +}; - ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass, - NV_CLIENT_CLASS, NULL, - (1ULL << NVDEV_ENGINE_DEVICE), - length, pobject); - client = *pobject; - if (ret) - return ret; +void +nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object) +{ + if (!RB_EMPTY_NODE(&object->node)) + rb_erase(&object->node, &client->objroot); +} - ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client), - &client->root); - if (ret) - return ret; +bool +nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object) +{ + struct rb_node **ptr = &client->objroot.rb_node; + struct rb_node *parent = NULL; - /* prevent init/fini being called, os in in charge of this */ - atomic_set(&nv_object(client)->usecount, 2); + while (*ptr) { + struct nvkm_object *this = + container_of(*ptr, typeof(*this), node); + parent = *ptr; + if (object->object < this->object) + ptr = &parent->rb_left; + else + if (object->object > this->object) + ptr = &parent->rb_right; + else + return false; + } - nvkm_object_ref(device, &client->device); - snprintf(client->name, sizeof(client->name), "%s", name); - client->debug = nvkm_dbgopt(dbg, "CLIENT"); - return 0; + rb_link_node(&object->node, parent, ptr); + rb_insert_color(&object->node, &client->objroot); + return true; } -int -nvkm_client_init(struct nvkm_client *client) +struct nvkm_object * +nvkm_client_search(struct nvkm_client *client, u64 handle) { - int ret; - nv_debug(client, "init running\n"); - ret = nvkm_handle_init(client->root); - nv_debug(client, "init completed with %d\n", ret); - return ret; + struct rb_node *node = client->objroot.rb_node; + while (node) { + struct nvkm_object *object = + container_of(node, typeof(*object), node); + if (handle < object->object) + node = node->rb_left; + else + if (handle > object->object) + node = node->rb_right; + else + return object; + } + return NULL; } int nvkm_client_fini(struct nvkm_client *client, bool suspend) { + struct nvkm_object *object = &client->object; const char *name[2] = { "fini", "suspend" }; - int ret, i; - nv_debug(client, "%s running\n", name[suspend]); - nv_debug(client, "%s notify\n", name[suspend]); + int i; + nvif_debug(object, "%s notify\n", name[suspend]); for (i = 0; i < ARRAY_SIZE(client->notify); i++) nvkm_client_notify_put(client, i); - nv_debug(client, "%s object\n", name[suspend]); - ret = nvkm_handle_fini(client->root, suspend); - nv_debug(client, "%s completed with %d\n", name[suspend], ret); - return ret; + return nvkm_object_fini(&client->object, suspend); +} + +int +nvkm_client_init(struct nvkm_client *client) +{ + return nvkm_object_init(&client->object); +} + +void +nvkm_client_del(struct nvkm_client **pclient) +{ + struct nvkm_client *client = *pclient; + int i; + if (client) { + nvkm_client_fini(client, false); + for (i = 0; i < ARRAY_SIZE(client->notify); i++) + nvkm_client_notify_del(client, i); + nvkm_object_dtor(&client->object); + kfree(*pclient); + *pclient = NULL; + } } -const char * -nvkm_client_name(void *obj) +int +nvkm_client_new(const char *name, u64 device, const char *cfg, + const char *dbg, struct nvkm_client **pclient) { - const char *client_name = "unknown"; - struct nvkm_client *client = nvkm_client(obj); - if (client) - client_name = client->name; - return client_name; + struct nvkm_oclass oclass = {}; + struct nvkm_client *client; + + if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL))) + return -ENOMEM; + oclass.client = client; + + nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object); + snprintf(client->name, sizeof(client->name), "%s", name); + client->device = device; + client->debug = nvkm_dbgopt(dbg, "CLIENT"); + client->objroot = RB_ROOT; + client->dmaroot = RB_ROOT; + return 0; } diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c deleted file mode 100644 index fb2acbca7..000000000 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/engctx.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include -#include -#include - -static inline int -nvkm_engctx_exists(struct nvkm_object *parent, - struct nvkm_engine *engine, void **pobject) -{ - struct nvkm_engctx *engctx; - struct nvkm_object *parctx; - - list_for_each_entry(engctx, &engine->contexts, head) { - parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS); - if (parctx == parent) { - atomic_inc(&nv_object(engctx)->refcount); - *pobject = engctx; - return 1; - } - } - - return 0; -} - -int -nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj, - struct nvkm_oclass *oclass, struct nvkm_object *pargpu, - u32 size, u32 align, u32 flags, int length, void **pobject) -{ - struct nvkm_client *client = nvkm_client(parent); - struct nvkm_engine *engine = nv_engine(engobj); - struct nvkm_object *engctx; - unsigned long save; - int ret; - - /* check if this engine already has a context for the parent object, - * and reference it instead of creating a new one - */ - spin_lock_irqsave(&engine->lock, save); - ret = nvkm_engctx_exists(parent, engine, pobject); - spin_unlock_irqrestore(&engine->lock, save); - if (ret) - return ret; - - /* create the new context, supports creating both raw objects and - * objects backed by instance memory - */ - if (size) { - ret = nvkm_gpuobj_create_(parent, engobj, oclass, - NV_ENGCTX_CLASS, pargpu, size, - align, flags, length, pobject); - } else { - ret = nvkm_object_create_(parent, engobj, oclass, - NV_ENGCTX_CLASS, length, pobject); - } - - engctx = *pobject; - if (ret) - return ret; - - /* must take the lock again and re-check a context doesn't already - * exist (in case of a race) - the lock had to be dropped before as - * it's not possible to allocate the object with it held. - */ - spin_lock_irqsave(&engine->lock, save); - ret = nvkm_engctx_exists(parent, engine, pobject); - if (ret) { - spin_unlock_irqrestore(&engine->lock, save); - nvkm_object_ref(NULL, &engctx); - return ret; - } - - if (client->vm) - atomic_inc(&client->vm->engref[nv_engidx(engine)]); - list_add(&nv_engctx(engctx)->head, &engine->contexts); - nv_engctx(engctx)->addr = ~0ULL; - spin_unlock_irqrestore(&engine->lock, save); - return 0; -} - -void -nvkm_engctx_destroy(struct nvkm_engctx *engctx) -{ - struct nvkm_engine *engine = engctx->gpuobj.object.engine; - struct nvkm_client *client = nvkm_client(engctx); - unsigned long save; - - nvkm_gpuobj_unmap(&engctx->vma); - spin_lock_irqsave(&engine->lock, save); - list_del(&engctx->head); - spin_unlock_irqrestore(&engine->lock, save); - - if (client->vm) - atomic_dec(&client->vm->engref[nv_engidx(engine)]); - - if (engctx->gpuobj.size) - nvkm_gpuobj_destroy(&engctx->gpuobj); - else - nvkm_object_destroy(&engctx->gpuobj.object); -} - -int -nvkm_engctx_init(struct nvkm_engctx *engctx) -{ - struct nvkm_object *object = nv_object(engctx); - struct nvkm_subdev *subdev = nv_subdev(object->engine); - struct nvkm_object *parent; - struct nvkm_subdev *pardev; - int ret; - - ret = nvkm_gpuobj_init(&engctx->gpuobj); - if (ret) - return ret; - - parent = nv_pclass(object->parent, NV_PARENT_CLASS); - pardev = nv_subdev(parent->engine); - if (nv_parent(parent)->context_attach) { - mutex_lock(&pardev->mutex); - ret = nv_parent(parent)->context_attach(parent, object); - mutex_unlock(&pardev->mutex); - } - - if (ret) { - nv_error(parent, "failed to attach %s context, %d\n", - subdev->name, ret); - return ret; - } - - nv_debug(parent, "attached %s context\n", subdev->name); - return 0; -} - -int -nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend) -{ - struct nvkm_object *object = nv_object(engctx); - struct nvkm_subdev *subdev = nv_subdev(object->engine); - struct nvkm_object *parent; - struct nvkm_subdev *pardev; - int ret = 0; - - parent = nv_pclass(object->parent, NV_PARENT_CLASS); - pardev = nv_subdev(parent->engine); - if (nv_parent(parent)->context_detach) { - mutex_lock(&pardev->mutex); - ret = nv_parent(parent)->context_detach(parent, suspend, object); - mutex_unlock(&pardev->mutex); - } - - if (ret) { - nv_error(parent, "failed to detach %s context, %d\n", - subdev->name, ret); - return ret; - } - - nv_debug(parent, "detached %s context\n", subdev->name); - return nvkm_gpuobj_fini(&engctx->gpuobj, suspend); -} - -int -_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) -{ - struct nvkm_engctx *engctx; - int ret; - - ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256, - NVOBJ_FLAG_ZERO_ALLOC, &engctx); - *pobject = nv_object(engctx); - return ret; -} - -void -_nvkm_engctx_dtor(struct nvkm_object *object) -{ - nvkm_engctx_destroy(nv_engctx(object)); -} - -int -_nvkm_engctx_init(struct nvkm_object *object) -{ - return nvkm_engctx_init(nv_engctx(object)); -} - -int -_nvkm_engctx_fini(struct nvkm_object *object, bool suspend) -{ - return nvkm_engctx_fini(nv_engctx(object), suspend); -} - -struct nvkm_object * -nvkm_engctx_get(struct nvkm_engine *engine, u64 addr) -{ - struct nvkm_engctx *engctx; - unsigned long flags; - - spin_lock_irqsave(&engine->lock, flags); - list_for_each_entry(engctx, &engine->contexts, head) { - if (engctx->addr == addr) { - engctx->save = flags; - return nv_object(engctx); - } - } - spin_unlock_irqrestore(&engine->lock, flags); - return NULL; -} - -void -nvkm_engctx_put(struct nvkm_object *object) -{ - if (object) { - struct nvkm_engine *engine = nv_engine(object->engine); - struct nvkm_engctx *engctx = nv_engctx(object); - spin_unlock_irqrestore(&engine->lock, engctx->save); - } -} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c index 60820173c..8a7bae7bd 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -25,51 +25,141 @@ #include #include +#include + +void +nvkm_engine_unref(struct nvkm_engine **pengine) +{ + struct nvkm_engine *engine = *pengine; + if (engine) { + mutex_lock(&engine->subdev.mutex); + if (--engine->usecount == 0) + nvkm_subdev_fini(&engine->subdev, false); + mutex_unlock(&engine->subdev.mutex); + *pengine = NULL; + } +} + struct nvkm_engine * -nvkm_engine(void *obj, int idx) +nvkm_engine_ref(struct nvkm_engine *engine) { - obj = nvkm_subdev(obj, idx); - if (obj && nv_iclass(obj, NV_ENGINE_CLASS)) - return nv_engine(obj); - return NULL; + if (engine) { + mutex_lock(&engine->subdev.mutex); + if (++engine->usecount == 1) { + int ret = nvkm_subdev_init(&engine->subdev); + if (ret) { + engine->usecount--; + mutex_unlock(&engine->subdev.mutex); + return ERR_PTR(ret); + } + } + mutex_unlock(&engine->subdev.mutex); + } + return engine; } -int -nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj, - struct nvkm_oclass *oclass, bool enable, - const char *iname, const char *fname, - int length, void **pobject) +void +nvkm_engine_tile(struct nvkm_engine *engine, int region) { - struct nvkm_engine *engine; - int ret; + struct nvkm_fb *fb = engine->subdev.device->fb; + if (engine->func->tile) + engine->func->tile(engine, region, &fb->tile.region[region]); +} - ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, - iname, fname, length, pobject); - engine = *pobject; - if (ret) - return ret; +static void +nvkm_engine_intr(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->intr) + engine->func->intr(engine); +} - if (parent) { - struct nvkm_device *device = nv_device(parent); - int engidx = nv_engidx(engine); +static int +nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->fini) + return engine->func->fini(engine, suspend); + return 0; +} - if (device->disable_mask & (1ULL << engidx)) { - if (!nvkm_boolopt(device->cfgopt, iname, false)) { - nv_debug(engine, "engine disabled by hw/fw\n"); - return -ENODEV; - } +static int +nvkm_engine_init(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + struct nvkm_fb *fb = subdev->device->fb; + int ret = 0, i; + s64 time; - nv_warn(engine, "ignoring hw/fw engine disable\n"); - } + if (!engine->usecount) { + nvkm_trace(subdev, "init skipped, engine has no users\n"); + return ret; + } - if (!nvkm_boolopt(device->cfgopt, iname, enable)) { - if (!enable) - nv_warn(engine, "disabled, %s=1 to enable\n", iname); - return -ENODEV; + if (engine->func->oneinit && !engine->subdev.oneinit) { + nvkm_trace(subdev, "one-time init running...\n"); + time = ktime_to_us(ktime_get()); + ret = engine->func->oneinit(engine); + if (ret) { + nvkm_trace(subdev, "one-time init failed, %d\n", ret); + return ret; } + + engine->subdev.oneinit = true; + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "one-time init completed in %lldus\n", time); + } + + if (engine->func->init) + ret = engine->func->init(engine); + + for (i = 0; fb && i < fb->tile.regions; i++) + nvkm_engine_tile(engine, i); + return ret; +} + +static void * +nvkm_engine_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->dtor) + return engine->func->dtor(engine); + return engine; +} + +static const struct nvkm_subdev_func +nvkm_engine_func = { + .dtor = nvkm_engine_dtor, + .init = nvkm_engine_init, + .fini = nvkm_engine_fini, + .intr = nvkm_engine_intr, +}; + +int +nvkm_engine_ctor(const struct nvkm_engine_func *func, + struct nvkm_device *device, int index, u32 pmc_enable, + bool enable, struct nvkm_engine *engine) +{ + nvkm_subdev_ctor(&nvkm_engine_func, device, index, + pmc_enable, &engine->subdev); + engine->func = func; + + if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) { + nvkm_debug(&engine->subdev, "disabled\n"); + return -ENODEV; } - INIT_LIST_HEAD(&engine->contexts); spin_lock_init(&engine->lock); return 0; } + +int +nvkm_engine_new_(const struct nvkm_engine_func *func, + struct nvkm_device *device, int index, u32 pmc_enable, + bool enable, struct nvkm_engine **pengine) +{ + if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL))) + return -ENOMEM; + return nvkm_engine_ctor(func, device, index, pmc_enable, + enable, *pengine); +} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c index 4f92bfc13..b9581feb2 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/enum.c @@ -38,29 +38,19 @@ nvkm_enum_find(const struct nvkm_enum *en, u32 value) return NULL; } -const struct nvkm_enum * -nvkm_enum_print(const struct nvkm_enum *en, u32 value) -{ - en = nvkm_enum_find(en, value); - if (en) - pr_cont("%s", en->name); - else - pr_cont("(unknown enum 0x%08x)", value); - return en; -} - void -nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value) +nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value) { - while (bf->name) { + bool space = false; + while (size >= 1 && bf->name) { if (value & bf->mask) { - pr_cont(" %s", bf->name); - value &= ~bf->mask; + int this = snprintf(data, size, "%s%s", + space ? " " : "", bf->name); + size -= this; + data += this; + space = true; } - bf++; } - - if (value) - pr_cont(" (unknown bits 0x%08x)", value); + data[0] = '\0'; } diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c index 2eba801aa..c3a790eb8 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c @@ -28,240 +28,205 @@ #include #include -void -nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) +/* fast-path, where backend is able to provide direct pointer to memory */ +static u32 +nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset) { - int i; - - if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) { - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, 0x00000000); - } - - if (gpuobj->node) - nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node); + return ioread32_native(gpuobj->map + offset); +} - if (gpuobj->heap.block_size) - nvkm_mm_fini(&gpuobj->heap); +static void +nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) +{ + iowrite32_native(data, gpuobj->map + offset); +} - nvkm_object_destroy(&gpuobj->object); +/* accessor functions for gpuobjs allocated directly from instmem */ +static u32 +nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) +{ + return nvkm_ro32(gpuobj->memory, offset); } -int -nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, u32 pclass, - struct nvkm_object *pargpu, u32 size, u32 align, u32 flags, - int length, void **pobject) +static void +nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) { - struct nvkm_instmem *imem = nvkm_instmem(parent); - struct nvkm_bar *bar = nvkm_bar(parent); - struct nvkm_gpuobj *gpuobj; - struct nvkm_mm *heap = NULL; - int ret, i; - u64 addr; + nvkm_wo32(gpuobj->memory, offset, data); +} - *pobject = NULL; +static const struct nvkm_gpuobj_func nvkm_gpuobj_heap; +static void +nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->func = &nvkm_gpuobj_heap; + nvkm_done(gpuobj->memory); +} - if (pargpu) { - while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { - if (nv_gpuobj(pargpu)->heap.block_size) - break; - pargpu = pargpu->parent; - } +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap_fast = { + .release = nvkm_gpuobj_heap_release, + .rd32 = nvkm_gpuobj_rd32_fast, + .wr32 = nvkm_gpuobj_wr32_fast, +}; - if (unlikely(pargpu == NULL)) { - nv_error(parent, "no gpuobj heap\n"); - return -EINVAL; - } +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap_slow = { + .release = nvkm_gpuobj_heap_release, + .rd32 = nvkm_gpuobj_heap_rd32, + .wr32 = nvkm_gpuobj_heap_wr32, +}; - addr = nv_gpuobj(pargpu)->addr; - heap = &nv_gpuobj(pargpu)->heap; - atomic_inc(&parent->refcount); - } else { - ret = imem->alloc(imem, parent, size, align, &parent); - pargpu = parent; - if (ret) - return ret; +static void * +nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->map = nvkm_kmap(gpuobj->memory); + if (likely(gpuobj->map)) + gpuobj->func = &nvkm_gpuobj_heap_fast; + else + gpuobj->func = &nvkm_gpuobj_heap_slow; + return gpuobj->map; +} - addr = nv_memobj(pargpu)->addr; - size = nv_memobj(pargpu)->size; - - if (bar && bar->alloc) { - struct nvkm_instobj *iobj = (void *)parent; - struct nvkm_mem **mem = (void *)(iobj + 1); - struct nvkm_mem *node = *mem; - if (!bar->alloc(bar, parent, node, &pargpu)) { - nvkm_object_ref(NULL, &parent); - parent = pargpu; - } - } - } +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap = { + .acquire = nvkm_gpuobj_heap_acquire, +}; - ret = nvkm_object_create_(parent, engine, oclass, pclass | - NV_GPUOBJ_CLASS, length, pobject); - nvkm_object_ref(NULL, &parent); - gpuobj = *pobject; - if (ret) - return ret; +/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */ +static u32 +nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) +{ + return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset); +} - gpuobj->parent = pargpu; - gpuobj->flags = flags; - gpuobj->addr = addr; - gpuobj->size = size; +static void +nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) +{ + nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data); +} - if (heap) { - ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1), - &gpuobj->node); - if (ret) - return ret; +static const struct nvkm_gpuobj_func nvkm_gpuobj_func; +static void +nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->func = &nvkm_gpuobj_func; + nvkm_done(gpuobj->parent); +} - gpuobj->addr += gpuobj->node->offset; - } +static const struct nvkm_gpuobj_func +nvkm_gpuobj_fast = { + .release = nvkm_gpuobj_release, + .rd32 = nvkm_gpuobj_rd32_fast, + .wr32 = nvkm_gpuobj_wr32_fast, +}; - if (gpuobj->flags & NVOBJ_FLAG_HEAP) { - ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); - if (ret) - return ret; - } +static const struct nvkm_gpuobj_func +nvkm_gpuobj_slow = { + .release = nvkm_gpuobj_release, + .rd32 = nvkm_gpuobj_rd32, + .wr32 = nvkm_gpuobj_wr32, +}; - if (flags & NVOBJ_FLAG_ZERO_ALLOC) { - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, 0x00000000); +static void * +nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->map = nvkm_kmap(gpuobj->parent); + if (likely(gpuobj->map)) { + gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset; + gpuobj->func = &nvkm_gpuobj_fast; + } else { + gpuobj->func = &nvkm_gpuobj_slow; } - - return ret; + return gpuobj->map; } -struct nvkm_gpuobj_class { - struct nvkm_object *pargpu; - u64 size; - u32 align; - u32 flags; +static const struct nvkm_gpuobj_func +nvkm_gpuobj_func = { + .acquire = nvkm_gpuobj_acquire, }; static int -_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, + struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj) { - struct nvkm_gpuobj_class *args = data; - struct nvkm_gpuobj *object; + u32 offset; int ret; - ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu, - args->size, args->align, args->flags, - &object); - *pobject = nv_object(object); - if (ret) - return ret; + if (parent) { + if (align >= 0) { + ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, + max(align, 1), &gpuobj->node); + } else { + ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, + -align, &gpuobj->node); + } + if (ret) + return ret; - return 0; -} + gpuobj->parent = parent; + gpuobj->func = &nvkm_gpuobj_func; + gpuobj->addr = parent->addr + gpuobj->node->offset; + gpuobj->size = gpuobj->node->length; -void -_nvkm_gpuobj_dtor(struct nvkm_object *object) -{ - nvkm_gpuobj_destroy(nv_gpuobj(object)); -} - -int -_nvkm_gpuobj_init(struct nvkm_object *object) -{ - return nvkm_gpuobj_init(nv_gpuobj(object)); -} + if (zero) { + nvkm_kmap(gpuobj); + for (offset = 0; offset < gpuobj->size; offset += 4) + nvkm_wo32(gpuobj, offset, 0x00000000); + nvkm_done(gpuobj); + } + } else { + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, + abs(align), zero, &gpuobj->memory); + if (ret) + return ret; -int -_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend) -{ - return nvkm_gpuobj_fini(nv_gpuobj(object), suspend); -} + gpuobj->func = &nvkm_gpuobj_heap; + gpuobj->addr = nvkm_memory_addr(gpuobj->memory); + gpuobj->size = nvkm_memory_size(gpuobj->memory); + } -u32 -_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr) -{ - struct nvkm_gpuobj *gpuobj = nv_gpuobj(object); - struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); - if (gpuobj->node) - addr += gpuobj->node->offset; - return pfuncs->rd32(gpuobj->parent, addr); + return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); } void -_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data) +nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj) { - struct nvkm_gpuobj *gpuobj = nv_gpuobj(object); - struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); - if (gpuobj->node) - addr += gpuobj->node->offset; - pfuncs->wr32(gpuobj->parent, addr, data); + struct nvkm_gpuobj *gpuobj = *pgpuobj; + if (gpuobj) { + if (gpuobj->parent) + nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); + nvkm_mm_fini(&gpuobj->heap); + nvkm_memory_del(&gpuobj->memory); + kfree(*pgpuobj); + *pgpuobj = NULL; + } } -static struct nvkm_oclass -_nvkm_gpuobj_oclass = { - .handle = 0x00000000, - .ofuncs = &(struct nvkm_ofuncs) { - .ctor = _nvkm_gpuobj_ctor, - .dtor = _nvkm_gpuobj_dtor, - .init = _nvkm_gpuobj_init, - .fini = _nvkm_gpuobj_fini, - .rd32 = _nvkm_gpuobj_rd32, - .wr32 = _nvkm_gpuobj_wr32, - }, -}; - int -nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu, - u32 size, u32 align, u32 flags, - struct nvkm_gpuobj **pgpuobj) +nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero, + struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj) { - struct nvkm_object *engine = parent; - struct nvkm_gpuobj_class args = { - .pargpu = pargpu, - .size = size, - .align = align, - .flags = flags, - }; - - if (!nv_iclass(engine, NV_SUBDEV_CLASS)) - engine = &engine->engine->subdev.object; - BUG_ON(engine == NULL); - - return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass, - &args, sizeof(args), - (struct nvkm_object **)pgpuobj); -} + struct nvkm_gpuobj *gpuobj; + int ret; -int -nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma) -{ - struct nvkm_bar *bar = nvkm_bar(gpuobj); - int ret = -EINVAL; - - if (bar && bar->umap) { - struct nvkm_instobj *iobj = (void *) - nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); - struct nvkm_mem **mem = (void *)(iobj + 1); - ret = bar->umap(bar, *mem, access, vma); - } + if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL))) + return -ENOMEM; + ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj); + if (ret) + nvkm_gpuobj_del(pgpuobj); return ret; } int -nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, - u32 access, struct nvkm_vma *vma) +nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, + u32 access, struct nvkm_vma *vma) { - struct nvkm_instobj *iobj = (void *) - nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); - struct nvkm_mem **mem = (void *)(iobj + 1); - int ret; - - ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma); - if (ret) - return ret; - - nvkm_vm_map(vma, *mem); - return 0; + struct nvkm_memory *memory = gpuobj->memory; + int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma); + if (ret == 0) + nvkm_memory_map(memory, vma, 0); + return ret; } void @@ -278,39 +243,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma) * anywhere else. */ -static void -nvkm_gpudup_dtor(struct nvkm_object *object) -{ - struct nvkm_gpuobj *gpuobj = (void *)object; - nvkm_object_ref(NULL, &gpuobj->parent); - nvkm_object_destroy(&gpuobj->object); -} - -static struct nvkm_oclass -nvkm_gpudup_oclass = { - .handle = NV_GPUOBJ_CLASS, - .ofuncs = &(struct nvkm_ofuncs) { - .dtor = nvkm_gpudup_dtor, - .init = nvkm_object_init, - .fini = nvkm_object_fini, - }, -}; - int -nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base, - struct nvkm_gpuobj **pgpuobj) +nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj) { - struct nvkm_gpuobj *gpuobj; - int ret; - - ret = nvkm_object_create(parent, &parent->engine->subdev.object, - &nvkm_gpudup_oclass, 0, &gpuobj); - *pgpuobj = gpuobj; - if (ret) - return ret; + if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL))) + return -ENOMEM; - nvkm_object_ref(nv_object(base), &gpuobj->parent); - gpuobj->addr = base->addr; - gpuobj->size = base->size; + (*pgpuobj)->addr = nvkm_memory_addr(memory); + (*pgpuobj)->size = nvkm_memory_size(memory); return 0; } diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c deleted file mode 100644 index dc7ff10eb..000000000 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/handle.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include -#include - -#define hprintk(h,l,f,a...) do { \ - struct nvkm_client *c = nvkm_client((h)->object); \ - struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ - nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \ -} while(0) - -int -nvkm_handle_init(struct nvkm_handle *handle) -{ - struct nvkm_handle *item; - int ret; - - hprintk(handle, TRACE, "init running\n"); - ret = nvkm_object_inc(handle->object); - if (ret) - return ret; - - hprintk(handle, TRACE, "init children\n"); - list_for_each_entry(item, &handle->tree, head) { - ret = nvkm_handle_init(item); - if (ret) - goto fail; - } - - hprintk(handle, TRACE, "init completed\n"); - return 0; -fail: - hprintk(handle, ERROR, "init failed with %d\n", ret); - list_for_each_entry_continue_reverse(item, &handle->tree, head) { - nvkm_handle_fini(item, false); - } - - nvkm_object_dec(handle->object, false); - return ret; -} - -int -nvkm_handle_fini(struct nvkm_handle *handle, bool suspend) -{ - static char *name[2] = { "fini", "suspend" }; - struct nvkm_handle *item; - int ret; - - hprintk(handle, TRACE, "%s children\n", name[suspend]); - list_for_each_entry(item, &handle->tree, head) { - ret = nvkm_handle_fini(item, suspend); - if (ret && suspend) - goto fail; - } - - hprintk(handle, TRACE, "%s running\n", name[suspend]); - if (handle->object) { - ret = nvkm_object_dec(handle->object, suspend); - if (ret && suspend) - goto fail; - } - - hprintk(handle, TRACE, "%s completed\n", name[suspend]); - return 0; -fail: - hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret); - list_for_each_entry_continue_reverse(item, &handle->tree, head) { - int rret = nvkm_handle_init(item); - if (rret) - hprintk(handle, FATAL, "failed to restart, %d\n", rret); - } - - return ret; -} - -int -nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle, - struct nvkm_object *object, struct nvkm_handle **phandle) -{ - struct nvkm_object *namedb; - struct nvkm_handle *handle; - int ret; - - namedb = parent; - while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) - namedb = namedb->parent; - - handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (!handle) - return -ENOMEM; - - INIT_LIST_HEAD(&handle->head); - INIT_LIST_HEAD(&handle->tree); - handle->name = _handle; - handle->priv = ~0; - - ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle); - if (ret) { - kfree(handle); - return ret; - } - - if (nv_parent(parent)->object_attach) { - ret = nv_parent(parent)->object_attach(parent, object, _handle); - if (ret < 0) { - nvkm_handle_destroy(handle); - return ret; - } - - handle->priv = ret; - } - - if (object != namedb) { - while (!nv_iclass(namedb, NV_CLIENT_CLASS)) - namedb = namedb->parent; - - handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent); - if (handle->parent) { - list_add(&handle->head, &handle->parent->tree); - nvkm_namedb_put(handle->parent); - } - } - - hprintk(handle, TRACE, "created\n"); - *phandle = handle; - return 0; -} - -void -nvkm_handle_destroy(struct nvkm_handle *handle) -{ - struct nvkm_handle *item, *temp; - - hprintk(handle, TRACE, "destroy running\n"); - list_for_each_entry_safe(item, temp, &handle->tree, head) { - nvkm_handle_destroy(item); - } - list_del(&handle->head); - - if (handle->priv != ~0) { - struct nvkm_object *parent = handle->parent->object; - nv_parent(parent)->object_detach(parent, handle->priv); - } - - hprintk(handle, TRACE, "destroy completed\n"); - nvkm_namedb_remove(handle); - kfree(handle); -} - -struct nvkm_object * -nvkm_handle_ref(struct nvkm_object *parent, u32 name) -{ - struct nvkm_object *object = NULL; - struct nvkm_handle *handle; - - while (!nv_iclass(parent, NV_NAMEDB_CLASS)) - parent = parent->parent; - - handle = nvkm_namedb_get(nv_namedb(parent), name); - if (handle) { - nvkm_object_ref(handle->object, &object); - nvkm_namedb_put(handle); - } - - return object; -} - -struct nvkm_handle * -nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass) -{ - struct nvkm_namedb *namedb; - if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) - return nvkm_namedb_get_class(namedb, oclass); - return NULL; -} - -struct nvkm_handle * -nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst) -{ - struct nvkm_namedb *namedb; - if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) - return nvkm_namedb_get_vinst(namedb, vinst); - return NULL; -} - -struct nvkm_handle * -nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst) -{ - struct nvkm_namedb *namedb; - if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) - return nvkm_namedb_get_cinst(namedb, cinst); - return NULL; -} - -void -nvkm_handle_put(struct nvkm_handle *handle) -{ - if (handle) - nvkm_namedb_put(handle); -} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index 4459ff5f4..d87d6ab03 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -24,196 +24,154 @@ #include #include #include -#include -#include #include #include static int -nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; union { - struct nvif_ioctl_nop none; + struct nvif_ioctl_nop_v0 v0; } *args = data; int ret; - nv_ioctl(object, "nop size %d\n", size); - if (nvif_unvers(args->none)) { - nv_ioctl(object, "nop\n"); + nvif_ioctl(object, "nop size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nvif_ioctl(object, "nop vers %lld\n", args->v0.version); + args->v0.version = NVIF_VERSION_LATEST; } return ret; } static int -nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; union { struct nvif_ioctl_sclass_v0 v0; } *args = data; - int ret; + struct nvkm_oclass oclass; + int ret, i = 0; - if (!nv_iclass(object, NV_PARENT_CLASS)) { - nv_debug(object, "cannot have children (sclass)\n"); - return -ENODEV; - } - - nv_ioctl(object, "sclass size %d\n", size); + nvif_ioctl(object, "sclass size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - nv_ioctl(object, "sclass vers %d count %d\n", - args->v0.version, args->v0.count); - if (size == args->v0.count * sizeof(args->v0.oclass[0])) { - ret = nvkm_parent_lclass(object, args->v0.oclass, - args->v0.count); - if (ret >= 0) { - args->v0.count = ret; - ret = 0; + nvif_ioctl(object, "sclass vers %d count %d\n", + args->v0.version, args->v0.count); + if (size != args->v0.count * sizeof(args->v0.oclass[0])) + return -EINVAL; + + while (object->func->sclass && + object->func->sclass(object, i, &oclass) >= 0) { + if (i < args->v0.count) { + args->v0.oclass[i].oclass = oclass.base.oclass; + args->v0.oclass[i].minver = oclass.base.minver; + args->v0.oclass[i].maxver = oclass.base.maxver; } - } else { - ret = -EINVAL; + i++; } + + args->v0.count = i; } return ret; } static int -nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) { union { struct nvif_ioctl_new_v0 v0; } *args = data; - struct nvkm_client *client = nvkm_client(handle->object); - struct nvkm_object *engctx = NULL; + struct nvkm_client *client = parent->client; struct nvkm_object *object = NULL; - struct nvkm_parent *parent; - struct nvkm_object *engine; - struct nvkm_oclass *oclass; - u32 _handle, _oclass; - int ret; + struct nvkm_oclass oclass; + int ret, i = 0; - nv_ioctl(client, "new size %d\n", size); + nvif_ioctl(parent, "new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - _handle = args->v0.handle; - _oclass = args->v0.oclass; + nvif_ioctl(parent, "new vers %d handle %08x class %08x " + "route %02x token %llx object %016llx\n", + args->v0.version, args->v0.handle, args->v0.oclass, + args->v0.route, args->v0.token, args->v0.object); } else return ret; - nv_ioctl(client, "new vers %d handle %08x class %08x " - "route %02x token %llx\n", - args->v0.version, _handle, _oclass, - args->v0.route, args->v0.token); - - if (!nv_iclass(handle->object, NV_PARENT_CLASS)) { - nv_debug(handle->object, "cannot have children (ctor)\n"); - ret = -ENODEV; - goto fail_class; + if (!parent->func->sclass) { + nvif_ioctl(parent, "cannot have children\n"); + return -EINVAL; } - parent = nv_parent(handle->object); - - /* check that parent supports the requested subclass */ - ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass); - if (ret) { - nv_debug(parent, "illegal class 0x%04x\n", _oclass); - goto fail_class; - } - - /* make sure engine init has been completed *before* any objects - * it controls are created - the constructors may depend on - * state calculated at init (ie. default context construction) - */ - if (engine) { - ret = nvkm_object_inc(engine); + do { + memset(&oclass, 0x00, sizeof(oclass)); + oclass.client = client; + oclass.handle = args->v0.handle; + oclass.object = args->v0.object; + oclass.parent = parent; + ret = parent->func->sclass(parent, i++, &oclass); if (ret) - goto fail_class; + return ret; + } while (oclass.base.oclass != args->v0.oclass); + + if (oclass.engine) { + oclass.engine = nvkm_engine_ref(oclass.engine); + if (IS_ERR(oclass.engine)) + return PTR_ERR(oclass.engine); } - /* if engine requires it, create a context object to insert - * between the parent and its children (eg. PGRAPH context) - */ - if (engine && nv_engine(engine)->cclass) { - ret = nvkm_object_ctor(&parent->object, engine, - nv_engine(engine)->cclass, - data, size, &engctx); - if (ret) - goto fail_engctx; - } else { - nvkm_object_ref(&parent->object, &engctx); + ret = oclass.ctor(&oclass, data, size, &object); + nvkm_engine_unref(&oclass.engine); + if (ret == 0) { + ret = nvkm_object_init(object); + if (ret == 0) { + list_add(&object->head, &parent->tree); + object->route = args->v0.route; + object->token = args->v0.token; + object->object = args->v0.object; + if (nvkm_client_insert(client, object)) { + client->data = object; + return 0; + } + ret = -EEXIST; + } + nvkm_object_fini(object, false); } - /* finally, create new object and bind it to its handle */ - ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object); - client->data = object; - if (ret) - goto fail_ctor; - - ret = nvkm_object_inc(object); - if (ret) - goto fail_init; - - ret = nvkm_handle_create(&parent->object, handle->name, - _handle, object, &handle); - if (ret) - goto fail_handle; - - ret = nvkm_handle_init(handle); - handle->route = args->v0.route; - handle->token = args->v0.token; - if (ret) - nvkm_handle_destroy(handle); - -fail_handle: - nvkm_object_dec(object, false); -fail_init: - nvkm_object_ref(NULL, &object); -fail_ctor: - nvkm_object_ref(NULL, &engctx); -fail_engctx: - if (engine) - nvkm_object_dec(engine, false); -fail_class: + nvkm_object_del(&object); return ret; } static int -nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; union { struct nvif_ioctl_del none; } *args = data; int ret; - nv_ioctl(object, "delete size %d\n", size); + nvif_ioctl(object, "delete size %d\n", size); if (nvif_unvers(args->none)) { - nv_ioctl(object, "delete\n"); - nvkm_handle_fini(handle, false); - nvkm_handle_destroy(handle); + nvif_ioctl(object, "delete\n"); + nvkm_object_fini(object, false); + nvkm_object_del(&object); } return ret; } static int -nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; - struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_mthd_v0 v0; } *args = data; int ret; - nv_ioctl(object, "mthd size %d\n", size); + nvif_ioctl(object, "mthd size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - nv_ioctl(object, "mthd vers %d mthd %02x\n", - args->v0.version, args->v0.method); - if (ret = -ENODEV, ofuncs->mthd) - ret = ofuncs->mthd(object, args->v0.method, data, size); + nvif_ioctl(object, "mthd vers %d mthd %02x\n", + args->v0.version, args->v0.method); + ret = nvkm_object_mthd(object, args->v0.method, data, size); } return ret; @@ -221,37 +179,34 @@ nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size) static int -nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; - struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_rd_v0 v0; } *args = data; + union { + u8 b08; + u16 b16; + u32 b32; + } v; int ret; - nv_ioctl(object, "rd size %d\n", size); + nvif_ioctl(object, "rd size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "rd vers %d size %d addr %016llx\n", - args->v0.version, args->v0.size, args->v0.addr); + nvif_ioctl(object, "rd vers %d size %d addr %016llx\n", + args->v0.version, args->v0.size, args->v0.addr); switch (args->v0.size) { case 1: - if (ret = -ENODEV, ofuncs->rd08) { - args->v0.data = nv_ro08(object, args->v0.addr); - ret = 0; - } + ret = nvkm_object_rd08(object, args->v0.addr, &v.b08); + args->v0.data = v.b08; break; case 2: - if (ret = -ENODEV, ofuncs->rd16) { - args->v0.data = nv_ro16(object, args->v0.addr); - ret = 0; - } + ret = nvkm_object_rd16(object, args->v0.addr, &v.b16); + args->v0.data = v.b16; break; case 4: - if (ret = -ENODEV, ofuncs->rd32) { - args->v0.data = nv_ro32(object, args->v0.addr); - ret = 0; - } + ret = nvkm_object_rd32(object, args->v0.addr, &v.b32); + args->v0.data = v.b32; break; default: ret = -EINVAL; @@ -263,104 +218,81 @@ nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size) } static int -nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; - struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_wr_v0 v0; } *args = data; int ret; - nv_ioctl(object, "wr size %d\n", size); + nvif_ioctl(object, "wr size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n", - args->v0.version, args->v0.size, args->v0.addr, - args->v0.data); - switch (args->v0.size) { - case 1: - if (ret = -ENODEV, ofuncs->wr08) { - nv_wo08(object, args->v0.addr, args->v0.data); - ret = 0; - } - break; - case 2: - if (ret = -ENODEV, ofuncs->wr16) { - nv_wo16(object, args->v0.addr, args->v0.data); - ret = 0; - } - break; - case 4: - if (ret = -ENODEV, ofuncs->wr32) { - nv_wo32(object, args->v0.addr, args->v0.data); - ret = 0; - } - break; - default: - ret = -EINVAL; - break; - } + nvif_ioctl(object, + "wr vers %d size %d addr %016llx data %08x\n", + args->v0.version, args->v0.size, args->v0.addr, + args->v0.data); + } else + return ret; + + switch (args->v0.size) { + case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data); + case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data); + case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data); + default: + break; } - return ret; + return -EINVAL; } static int -nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; - struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_map_v0 v0; } *args = data; int ret; - nv_ioctl(object, "map size %d\n", size); + nvif_ioctl(object, "map size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "map vers %d\n", args->v0.version); - if (ret = -ENODEV, ofuncs->map) { - ret = ofuncs->map(object, &args->v0.handle, - &args->v0.length); - } + nvif_ioctl(object, "map vers %d\n", args->v0.version); + ret = nvkm_object_map(object, &args->v0.handle, + &args->v0.length); } return ret; } static int -nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; union { struct nvif_ioctl_unmap none; } *args = data; int ret; - nv_ioctl(object, "unmap size %d\n", size); + nvif_ioctl(object, "unmap size %d\n", size); if (nvif_unvers(args->none)) { - nv_ioctl(object, "unmap\n"); + nvif_ioctl(object, "unmap\n"); } return ret; } static int -nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_object *object = handle->object; - struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_ntfy_new_v0 v0; } *args = data; struct nvkm_event *event; int ret; - nv_ioctl(object, "ntfy new size %d\n", size); + nvif_ioctl(object, "ntfy new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - nv_ioctl(object, "ntfy new vers %d event %02x\n", - args->v0.version, args->v0.event); - if (ret = -ENODEV, ofuncs->ntfy) - ret = ofuncs->ntfy(object, args->v0.event, &event); + nvif_ioctl(object, "ntfy new vers %d event %02x\n", + args->v0.version, args->v0.event); + ret = nvkm_object_ntfy(object, args->v0.event, &event); if (ret == 0) { ret = nvkm_client_notify_new(object, event, data, size); if (ret >= 0) { @@ -374,19 +306,18 @@ nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size) } static int -nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_client *client = nvkm_client(handle->object); - struct nvkm_object *object = handle->object; + struct nvkm_client *client = object->client; union { struct nvif_ioctl_ntfy_del_v0 v0; } *args = data; int ret; - nv_ioctl(object, "ntfy del size %d\n", size); + nvif_ioctl(object, "ntfy del size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "ntfy del vers %d index %d\n", - args->v0.version, args->v0.index); + nvif_ioctl(object, "ntfy del vers %d index %d\n", + args->v0.version, args->v0.index); ret = nvkm_client_notify_del(client, args->v0.index); } @@ -394,19 +325,18 @@ nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size) } static int -nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_client *client = nvkm_client(handle->object); - struct nvkm_object *object = handle->object; + struct nvkm_client *client = object->client; union { struct nvif_ioctl_ntfy_get_v0 v0; } *args = data; int ret; - nv_ioctl(object, "ntfy get size %d\n", size); + nvif_ioctl(object, "ntfy get size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "ntfy get vers %d index %d\n", - args->v0.version, args->v0.index); + nvif_ioctl(object, "ntfy get vers %d index %d\n", + args->v0.version, args->v0.index); ret = nvkm_client_notify_get(client, args->v0.index); } @@ -414,19 +344,18 @@ nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size) } static int -nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size) +nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size) { - struct nvkm_client *client = nvkm_client(handle->object); - struct nvkm_object *object = handle->object; + struct nvkm_client *client = object->client; union { struct nvif_ioctl_ntfy_put_v0 v0; } *args = data; int ret; - nv_ioctl(object, "ntfy put size %d\n", size); + nvif_ioctl(object, "ntfy put size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { - nv_ioctl(object, "ntfy put vers %d index %d\n", - args->v0.version, args->v0.index); + nvif_ioctl(object, "ntfy put vers %d index %d\n", + args->v0.version, args->v0.index); ret = nvkm_client_notify_put(client, args->v0.index); } @@ -435,7 +364,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size) static struct { int version; - int (*func)(struct nvkm_handle *, void *, u32); + int (*func)(struct nvkm_object *, void *, u32); } nvkm_ioctl_v0[] = { { 0x00, nvkm_ioctl_nop }, @@ -454,40 +383,31 @@ nvkm_ioctl_v0[] = { }; static int -nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path, +nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, void *data, u32 size, u8 owner, u8 *route, u64 *token) { - struct nvkm_handle *handle = parent; - struct nvkm_namedb *namedb; struct nvkm_object *object; int ret; - while ((object = parent->object), nr--) { - nv_ioctl(object, "path 0x%08x\n", path[nr]); - if (!nv_iclass(object, NV_PARENT_CLASS)) { - nv_debug(object, "cannot have children (path)\n"); - return -EINVAL; - } - - if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || - !(handle = nvkm_namedb_get(namedb, path[nr]))) { - nv_debug(object, "handle 0x%08x not found\n", path[nr]); - return -ENOENT; - } - nvkm_namedb_put(handle); - parent = handle; + if (handle) + object = nvkm_client_search(client, handle); + else + object = &client->object; + if (unlikely(!object)) { + nvif_ioctl(&client->object, "object not found\n"); + return -ENOENT; } - if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) { - nv_ioctl(object, "object route != owner\n"); + if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { + nvif_ioctl(&client->object, "route != owner\n"); return -EACCES; } - *route = handle->route; - *token = handle->token; + *route = object->route; + *token = object->token; if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { if (nvkm_ioctl_v0[type].version == 0) - ret = nvkm_ioctl_v0[type].func(handle, data, size); + ret = nvkm_ioctl_v0[type].func(object, data, size); } return ret; @@ -497,25 +417,26 @@ int nvkm_ioctl(struct nvkm_client *client, bool supervisor, void *data, u32 size, void **hack) { + struct nvkm_object *object = &client->object; union { struct nvif_ioctl_v0 v0; } *args = data; int ret; client->super = supervisor; - nv_ioctl(client, "size %d\n", size); + nvif_ioctl(object, "size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { - nv_ioctl(client, "vers %d type %02x path %d owner %02x\n", - args->v0.version, args->v0.type, args->v0.path_nr, - args->v0.owner); - ret = nvkm_ioctl_path(client->root, args->v0.type, - args->v0.path_nr, args->v0.path, + nvif_ioctl(object, + "vers %d type %02x object %016llx owner %02x\n", + args->v0.version, args->v0.type, args->v0.object, + args->v0.owner); + ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type, data, size, args->v0.owner, &args->v0.route, &args->v0.token); } - nv_ioctl(client, "return %d\n", ret); + nvif_ioctl(object, "return %d\n", ret); if (hack) { *hack = client->data; client->data = NULL; diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/memory.c new file mode 100644 index 000000000..8903c04c9 --- /dev/null +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -0,0 +1,64 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include +#include + +void +nvkm_memory_ctor(const struct nvkm_memory_func *func, + struct nvkm_memory *memory) +{ + memory->func = func; +} + +void +nvkm_memory_del(struct nvkm_memory **pmemory) +{ + struct nvkm_memory *memory = *pmemory; + if (memory && !WARN_ON(!memory->func)) { + if (memory->func->dtor) + *pmemory = memory->func->dtor(memory); + kfree(*pmemory); + *pmemory = NULL; + } +} + +int +nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target, + u64 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nvkm_instmem *imem = device->imem; + struct nvkm_memory *memory; + int ret = -ENOSYS; + + if (unlikely(target != NVKM_MEM_TARGET_INST || !imem)) + return -ENOSYS; + + ret = nvkm_instobj_new(imem, size, align, zero, &memory); + if (ret) + return ret; + + *pmemory = memory; + return 0; +} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c index 7f458dfd5..09a1eee8f 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/mm.c @@ -26,7 +26,7 @@ #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry) -static void +void nvkm_mm_dump(struct nvkm_mm *mm, const char *header) { struct nvkm_mm_node *node; diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c deleted file mode 100644 index 6400767c5..000000000 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/namedb.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include -#include -#include - -static struct nvkm_handle * -nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name) -{ - struct nvkm_handle *handle; - - list_for_each_entry(handle, &namedb->list, node) { - if (handle->name == name) - return handle; - } - - return NULL; -} - -static struct nvkm_handle * -nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass) -{ - struct nvkm_handle *handle; - - list_for_each_entry(handle, &namedb->list, node) { - if (nv_mclass(handle->object) == oclass) - return handle; - } - - return NULL; -} - -static struct nvkm_handle * -nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst) -{ - struct nvkm_handle *handle; - - list_for_each_entry(handle, &namedb->list, node) { - if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { - if (nv_gpuobj(handle->object)->addr == vinst) - return handle; - } - } - - return NULL; -} - -static struct nvkm_handle * -nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst) -{ - struct nvkm_handle *handle; - - list_for_each_entry(handle, &namedb->list, node) { - if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { - if (nv_gpuobj(handle->object)->node && - nv_gpuobj(handle->object)->node->offset == cinst) - return handle; - } - } - - return NULL; -} - -int -nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name, - struct nvkm_object *object, - struct nvkm_handle *handle) -{ - int ret = -EEXIST; - write_lock_irq(&namedb->lock); - if (!nvkm_namedb_lookup(namedb, name)) { - nvkm_object_ref(object, &handle->object); - handle->namedb = namedb; - list_add(&handle->node, &namedb->list); - ret = 0; - } - write_unlock_irq(&namedb->lock); - return ret; -} - -void -nvkm_namedb_remove(struct nvkm_handle *handle) -{ - struct nvkm_namedb *namedb = handle->namedb; - struct nvkm_object *object = handle->object; - write_lock_irq(&namedb->lock); - list_del(&handle->node); - write_unlock_irq(&namedb->lock); - nvkm_object_ref(NULL, &object); -} - -struct nvkm_handle * -nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name) -{ - struct nvkm_handle *handle; - read_lock(&namedb->lock); - handle = nvkm_namedb_lookup(namedb, name); - if (handle == NULL) - read_unlock(&namedb->lock); - return handle; -} - -struct nvkm_handle * -nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass) -{ - struct nvkm_handle *handle; - read_lock(&namedb->lock); - handle = nvkm_namedb_lookup_class(namedb, oclass); - if (handle == NULL) - read_unlock(&namedb->lock); - return handle; -} - -struct nvkm_handle * -nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst) -{ - struct nvkm_handle *handle; - read_lock(&namedb->lock); - handle = nvkm_namedb_lookup_vinst(namedb, vinst); - if (handle == NULL) - read_unlock(&namedb->lock); - return handle; -} - -struct nvkm_handle * -nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst) -{ - struct nvkm_handle *handle; - read_lock(&namedb->lock); - handle = nvkm_namedb_lookup_cinst(namedb, cinst); - if (handle == NULL) - read_unlock(&namedb->lock); - return handle; -} - -void -nvkm_namedb_put(struct nvkm_handle *handle) -{ - if (handle) - read_unlock(&handle->namedb->lock); -} - -int -nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, u32 pclass, - struct nvkm_oclass *sclass, u64 engcls, - int length, void **pobject) -{ - struct nvkm_namedb *namedb; - int ret; - - ret = nvkm_parent_create_(parent, engine, oclass, pclass | - NV_NAMEDB_CLASS, sclass, engcls, - length, pobject); - namedb = *pobject; - if (ret) - return ret; - - rwlock_init(&namedb->lock); - INIT_LIST_HEAD(&namedb->list); - return 0; -} - -int -_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) -{ - struct nvkm_namedb *object; - int ret; - - ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object); - *pobject = nv_object(object); - if (ret) - return ret; - - return 0; -} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c index 979f3627d..67aa7223d 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/object.c @@ -22,309 +22,243 @@ * Authors: Ben Skeggs */ #include +#include #include -#ifdef NVKM_OBJECT_MAGIC -static struct list_head _objlist = LIST_HEAD_INIT(_objlist); -static DEFINE_SPINLOCK(_objlist_lock); -#endif - int -nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, u32 pclass, - int size, void **pobject) +nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) { - struct nvkm_object *object; - - object = *pobject = kzalloc(size, GFP_KERNEL); - if (!object) - return -ENOMEM; - - nvkm_object_ref(parent, &object->parent); - nvkm_object_ref(engine, (struct nvkm_object **)&object->engine); - object->oclass = oclass; - object->oclass->handle |= pclass; - atomic_set(&object->refcount, 1); - atomic_set(&object->usecount, 0); - -#ifdef NVKM_OBJECT_MAGIC - object->_magic = NVKM_OBJECT_MAGIC; - spin_lock(&_objlist_lock); - list_add(&object->list, &_objlist); - spin_unlock(&_objlist_lock); -#endif - return 0; + if (likely(object->func->mthd)) + return object->func->mthd(object, mthd, data, size); + return -ENODEV; } int -_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +nvkm_object_ntfy(struct nvkm_object *object, u32 mthd, + struct nvkm_event **pevent) { - if (size != 0) - return -ENOSYS; - return nvkm_object_create(parent, engine, oclass, 0, pobject); + if (likely(object->func->ntfy)) + return object->func->ntfy(object, mthd, pevent); + return -ENODEV; } -void -nvkm_object_destroy(struct nvkm_object *object) +int +nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size) { -#ifdef NVKM_OBJECT_MAGIC - spin_lock(&_objlist_lock); - list_del(&object->list); - spin_unlock(&_objlist_lock); -#endif - nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine); - nvkm_object_ref(NULL, &object->parent); - kfree(object); + if (likely(object->func->map)) + return object->func->map(object, addr, size); + return -ENODEV; } int -nvkm_object_init(struct nvkm_object *object) +nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data) { - return 0; + if (likely(object->func->rd08)) + return object->func->rd08(object, addr, data); + return -ENODEV; } int -nvkm_object_fini(struct nvkm_object *object, bool suspend) +nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data) { - return 0; + if (likely(object->func->rd16)) + return object->func->rd16(object, addr, data); + return -ENODEV; } -struct nvkm_ofuncs -nvkm_object_ofuncs = { - .ctor = _nvkm_object_ctor, - .dtor = nvkm_object_destroy, - .init = nvkm_object_init, - .fini = nvkm_object_fini, -}; - int -nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, void *data, u32 size, - struct nvkm_object **pobject) +nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data) { - struct nvkm_ofuncs *ofuncs = oclass->ofuncs; - struct nvkm_object *object = NULL; - int ret; - - ret = ofuncs->ctor(parent, engine, oclass, data, size, &object); - *pobject = object; - if (ret < 0) { - if (ret != -ENODEV) { - nv_error(parent, "failed to create 0x%08x, %d\n", - oclass->handle, ret); - } - - if (object) { - ofuncs->dtor(object); - *pobject = NULL; - } - - return ret; - } - - if (ret == 0) { - nv_trace(object, "created\n"); - atomic_set(&object->refcount, 1); - } - - return 0; + if (likely(object->func->rd32)) + return object->func->rd32(object, addr, data); + return -ENODEV; } -static void -nvkm_object_dtor(struct nvkm_object *object) +int +nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data) { - nv_trace(object, "destroying\n"); - nv_ofuncs(object)->dtor(object); + if (likely(object->func->wr08)) + return object->func->wr08(object, addr, data); + return -ENODEV; } -void -nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref) +int +nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data) { - if (obj) { - atomic_inc(&obj->refcount); - nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount)); - } + if (likely(object->func->wr16)) + return object->func->wr16(object, addr, data); + return -ENODEV; +} - if (*ref) { - int dead = atomic_dec_and_test(&(*ref)->refcount); - nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount)); - if (dead) - nvkm_object_dtor(*ref); - } +int +nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data) +{ + if (likely(object->func->wr32)) + return object->func->wr32(object, addr, data); + return -ENODEV; +} - *ref = obj; +int +nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj, + int align, struct nvkm_gpuobj **pgpuobj) +{ + if (object->func->bind) + return object->func->bind(object, gpuobj, align, pgpuobj); + return -ENODEV; } int -nvkm_object_inc(struct nvkm_object *object) +nvkm_object_fini(struct nvkm_object *object, bool suspend) { - int ref = atomic_add_return(1, &object->usecount); + const char *action = suspend ? "suspend" : "fini"; + struct nvkm_object *child; + s64 time; int ret; - nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount)); - if (ref != 1) - return 0; - - nv_trace(object, "initialising...\n"); - if (object->parent) { - ret = nvkm_object_inc(object->parent); - if (ret) { - nv_error(object, "parent failed, %d\n", ret); - goto fail_parent; - } + nvif_debug(object, "%s children...\n", action); + time = ktime_to_us(ktime_get()); + list_for_each_entry(child, &object->tree, head) { + ret = nvkm_object_fini(child, suspend); + if (ret && suspend) + goto fail_child; } - if (object->engine) { - mutex_lock(&nv_subdev(object->engine)->mutex); - ret = nvkm_object_inc(&object->engine->subdev.object); - mutex_unlock(&nv_subdev(object->engine)->mutex); + nvif_debug(object, "%s running...\n", action); + if (object->func->fini) { + ret = object->func->fini(object, suspend); if (ret) { - nv_error(object, "engine failed, %d\n", ret); - goto fail_engine; + nvif_error(object, "%s failed with %d\n", action, ret); + if (suspend) + goto fail; } } - ret = nv_ofuncs(object)->init(object); - atomic_set(&object->usecount, 1); - if (ret) { - nv_error(object, "init failed, %d\n", ret); - goto fail_self; - } - - nv_trace(object, "initialised\n"); + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "%s completed in %lldus\n", action, time); return 0; -fail_self: - if (object->engine) { - mutex_lock(&nv_subdev(object->engine)->mutex); - nvkm_object_dec(&object->engine->subdev.object, false); - mutex_unlock(&nv_subdev(object->engine)->mutex); +fail: + if (object->func->init) { + int rret = object->func->init(object); + if (rret) + nvif_fatal(object, "failed to restart, %d\n", rret); + } +fail_child: + list_for_each_entry_continue_reverse(child, &object->tree, head) { + nvkm_object_init(child); } -fail_engine: - if (object->parent) - nvkm_object_dec(object->parent, false); -fail_parent: - atomic_dec(&object->usecount); return ret; } -static int -nvkm_object_decf(struct nvkm_object *object) +int +nvkm_object_init(struct nvkm_object *object) { + struct nvkm_object *child; + s64 time; int ret; - nv_trace(object, "stopping...\n"); - - ret = nv_ofuncs(object)->fini(object, false); - atomic_set(&object->usecount, 0); - if (ret) - nv_warn(object, "failed fini, %d\n", ret); - - if (object->engine) { - mutex_lock(&nv_subdev(object->engine)->mutex); - nvkm_object_dec(&object->engine->subdev.object, false); - mutex_unlock(&nv_subdev(object->engine)->mutex); + nvif_debug(object, "init running...\n"); + time = ktime_to_us(ktime_get()); + if (object->func->init) { + ret = object->func->init(object); + if (ret) + goto fail; } - if (object->parent) - nvkm_object_dec(object->parent, false); + nvif_debug(object, "init children...\n"); + list_for_each_entry(child, &object->tree, head) { + ret = nvkm_object_init(child); + if (ret) + goto fail_child; + } - nv_trace(object, "stopped\n"); + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "init completed in %lldus\n", time); return 0; + +fail_child: + list_for_each_entry_continue_reverse(child, &object->tree, head) + nvkm_object_fini(child, false); +fail: + nvif_error(object, "init failed with %d\n", ret); + if (object->func->fini) + object->func->fini(object, false); + return ret; } -static int -nvkm_object_decs(struct nvkm_object *object) +void * +nvkm_object_dtor(struct nvkm_object *object) { - int ret, rret; - - nv_trace(object, "suspending...\n"); - - ret = nv_ofuncs(object)->fini(object, true); - atomic_set(&object->usecount, 0); - if (ret) { - nv_error(object, "failed suspend, %d\n", ret); - return ret; + struct nvkm_object *child, *ctemp; + void *data = object; + s64 time; + + nvif_debug(object, "destroy children...\n"); + time = ktime_to_us(ktime_get()); + list_for_each_entry_safe(child, ctemp, &object->tree, head) { + nvkm_object_del(&child); } - if (object->engine) { - mutex_lock(&nv_subdev(object->engine)->mutex); - ret = nvkm_object_dec(&object->engine->subdev.object, true); - mutex_unlock(&nv_subdev(object->engine)->mutex); - if (ret) { - nv_warn(object, "engine failed suspend, %d\n", ret); - goto fail_engine; - } - } - - if (object->parent) { - ret = nvkm_object_dec(object->parent, true); - if (ret) { - nv_warn(object, "parent failed suspend, %d\n", ret); - goto fail_parent; - } - } - - nv_trace(object, "suspended\n"); - return 0; + nvif_debug(object, "destroy running...\n"); + if (object->func->dtor) + data = object->func->dtor(object); + nvkm_engine_unref(&object->engine); + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "destroy completed in %lldus...\n", time); + return data; +} -fail_parent: - if (object->engine) { - mutex_lock(&nv_subdev(object->engine)->mutex); - rret = nvkm_object_inc(&object->engine->subdev.object); - mutex_unlock(&nv_subdev(object->engine)->mutex); - if (rret) - nv_fatal(object, "engine failed to reinit, %d\n", rret); +void +nvkm_object_del(struct nvkm_object **pobject) +{ + struct nvkm_object *object = *pobject; + if (object && !WARN_ON(!object->func)) { + *pobject = nvkm_object_dtor(object); + nvkm_client_remove(object->client, object); + list_del(&object->head); + kfree(*pobject); + *pobject = NULL; } +} -fail_engine: - rret = nv_ofuncs(object)->init(object); - if (rret) - nv_fatal(object, "failed to reinit, %d\n", rret); - - return ret; +void +nvkm_object_ctor(const struct nvkm_object_func *func, + const struct nvkm_oclass *oclass, struct nvkm_object *object) +{ + object->func = func; + object->client = oclass->client; + object->engine = nvkm_engine_ref(oclass->engine); + object->oclass = oclass->base.oclass; + object->handle = oclass->handle; + INIT_LIST_HEAD(&object->head); + INIT_LIST_HEAD(&object->tree); + RB_CLEAR_NODE(&object->node); + WARN_ON(oclass->engine && !object->engine); } int -nvkm_object_dec(struct nvkm_object *object, bool suspend) +nvkm_object_new_(const struct nvkm_object_func *func, + const struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) { - int ref = atomic_add_return(-1, &object->usecount); - int ret; - - nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount)); - - if (ref == 0) { - if (suspend) - ret = nvkm_object_decs(object); - else - ret = nvkm_object_decf(object); - - if (ret) { - atomic_inc(&object->usecount); - return ret; - } + if (size == 0) { + if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL))) + return -ENOMEM; + nvkm_object_ctor(func, oclass, *pobject); + return 0; } - - return 0; + return -ENOSYS; } -void -nvkm_object_debug(void) +static const struct nvkm_object_func +nvkm_object_func = { +}; + +int +nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) { -#ifdef NVKM_OBJECT_MAGIC - struct nvkm_object *object; - if (!list_empty(&_objlist)) { - nv_fatal(NULL, "*******************************************\n"); - nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n"); - nv_fatal(NULL, "*******************************************\n"); - list_for_each_entry(object, &_objlist, list) { - nv_fatal(object, "%p/%p/%d/%d\n", - object->parent, object->engine, - atomic_read(&object->refcount), - atomic_read(&object->usecount)); - } - } -#endif + const struct nvkm_object_func *func = + oclass->base.func ? oclass->base.func : &nvkm_object_func; + return nvkm_object_new_(func, oclass, data, size, pobject); } diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c new file mode 100644 index 000000000..e31a0479a --- /dev/null +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c @@ -0,0 +1,200 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include + +static int +nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) +{ + return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size); +} + +static int +nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd, + struct nvkm_event **pevent) +{ + return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent); +} + +static int +nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size) +{ + return nvkm_object_map(nvkm_oproxy(object)->object, addr, size); +} + +static int +nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data) +{ + return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data) +{ + return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data) +{ + return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data) +{ + return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data) +{ + return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data) +{ + return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, + int align, struct nvkm_gpuobj **pgpuobj) +{ + return nvkm_object_bind(nvkm_oproxy(object)->object, + parent, align, pgpuobj); +} + +static int +nvkm_oproxy_sclass(struct nvkm_object *object, int index, + struct nvkm_oclass *oclass) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + oclass->parent = oproxy->object; + if (!oproxy->object->func->sclass) + return -ENODEV; + return oproxy->object->func->sclass(oproxy->object, index, oclass); +} + +static int +nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + int ret; + + if (oproxy->func->fini[0]) { + ret = oproxy->func->fini[0](oproxy, suspend); + if (ret && suspend) + return ret; + } + + if (oproxy->object->func->fini) { + ret = oproxy->object->func->fini(oproxy->object, suspend); + if (ret && suspend) + return ret; + } + + if (oproxy->func->fini[1]) { + ret = oproxy->func->fini[1](oproxy, suspend); + if (ret && suspend) + return ret; + } + + return 0; +} + +static int +nvkm_oproxy_init(struct nvkm_object *object) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + int ret; + + if (oproxy->func->init[0]) { + ret = oproxy->func->init[0](oproxy); + if (ret) + return ret; + } + + if (oproxy->object->func->init) { + ret = oproxy->object->func->init(oproxy->object); + if (ret) + return ret; + } + + if (oproxy->func->init[1]) { + ret = oproxy->func->init[1](oproxy); + if (ret) + return ret; + } + + return 0; +} + +static void * +nvkm_oproxy_dtor(struct nvkm_object *object) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + if (oproxy->func->dtor[0]) + oproxy->func->dtor[0](oproxy); + nvkm_object_del(&oproxy->object); + if (oproxy->func->dtor[1]) + oproxy->func->dtor[1](oproxy); + return oproxy; +} + +static const struct nvkm_object_func +nvkm_oproxy_func = { + .dtor = nvkm_oproxy_dtor, + .init = nvkm_oproxy_init, + .fini = nvkm_oproxy_fini, + .mthd = nvkm_oproxy_mthd, + .ntfy = nvkm_oproxy_ntfy, + .map = nvkm_oproxy_map, + .rd08 = nvkm_oproxy_rd08, + .rd16 = nvkm_oproxy_rd16, + .rd32 = nvkm_oproxy_rd32, + .wr08 = nvkm_oproxy_wr08, + .wr16 = nvkm_oproxy_wr16, + .wr32 = nvkm_oproxy_wr32, + .bind = nvkm_oproxy_bind, + .sclass = nvkm_oproxy_sclass, +}; + +void +nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func, + const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy) +{ + nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base); + oproxy->func = func; +} + +int +nvkm_oproxy_new_(const struct nvkm_oproxy_func *func, + const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy) +{ + if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL))) + return -ENOMEM; + nvkm_oproxy_ctor(func, oclass, *poproxy); + return 0; +} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c index 19d153f8c..3e62cf8cd 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/option.c @@ -73,6 +73,24 @@ nvkm_boolopt(const char *optstr, const char *opt, bool value) return value; } +long +nvkm_longopt(const char *optstr, const char *opt, long value) +{ + long result = value; + int arglen; + char *s; + + optstr = nvkm_stropt(optstr, opt, &arglen); + if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) { + int ret = kstrtol(s, 0, &value); + if (ret == 0) + result = value; + kfree(s); + } + + return result; +} + int nvkm_dbgopt(const char *optstr, const char *sub) { @@ -95,7 +113,7 @@ nvkm_dbgopt(const char *optstr, const char *sub) else if (!strncasecmpz(optstr, "warn", len)) level = NV_DBG_WARN; else if (!strncasecmpz(optstr, "info", len)) - level = NV_DBG_INFO_NORMAL; + level = NV_DBG_INFO; else if (!strncasecmpz(optstr, "debug", len)) level = NV_DBG_DEBUG; else if (!strncasecmpz(optstr, "trace", len)) diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c deleted file mode 100644 index dd56cd1ee..000000000 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/parent.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include -#include -#include - -int -nvkm_parent_sclass(struct nvkm_object *parent, u16 handle, - struct nvkm_object **pengine, - struct nvkm_oclass **poclass) -{ - struct nvkm_sclass *sclass; - struct nvkm_engine *engine; - struct nvkm_oclass *oclass; - u64 mask; - - sclass = nv_parent(parent)->sclass; - while (sclass) { - if ((sclass->oclass->handle & 0xffff) == handle) { - *pengine = &parent->engine->subdev.object; - *poclass = sclass->oclass; - return 0; - } - - sclass = sclass->sclass; - } - - mask = nv_parent(parent)->engine; - while (mask) { - int i = __ffs64(mask); - - if (nv_iclass(parent, NV_CLIENT_CLASS)) - engine = nv_engine(nv_client(parent)->device); - else - engine = nvkm_engine(parent, i); - - if (engine) { - oclass = engine->sclass; - while (oclass->ofuncs) { - if ((oclass->handle & 0xffff) == handle) { - *pengine = nv_object(engine); - *poclass = oclass; - return 0; - } - oclass++; - } - } - - mask &= ~(1ULL << i); - } - - return -EINVAL; -} - -int -nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size) -{ - struct nvkm_sclass *sclass; - struct nvkm_engine *engine; - struct nvkm_oclass *oclass; - int nr = -1, i; - u64 mask; - - sclass = nv_parent(parent)->sclass; - while (sclass) { - if (++nr < size) - lclass[nr] = sclass->oclass->handle & 0xffff; - sclass = sclass->sclass; - } - - mask = nv_parent(parent)->engine; - while (i = __ffs64(mask), mask) { - engine = nvkm_engine(parent, i); - if (engine && (oclass = engine->sclass)) { - while (oclass->ofuncs) { - if (++nr < size) - lclass[nr] = oclass->handle & 0xffff; - oclass++; - } - } - - mask &= ~(1ULL << i); - } - - return nr + 1; -} - -int -nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, u32 pclass, - struct nvkm_oclass *sclass, u64 engcls, - int size, void **pobject) -{ - struct nvkm_parent *object; - struct nvkm_sclass *nclass; - int ret; - - ret = nvkm_object_create_(parent, engine, oclass, pclass | - NV_PARENT_CLASS, size, pobject); - object = *pobject; - if (ret) - return ret; - - while (sclass && sclass->ofuncs) { - nclass = kzalloc(sizeof(*nclass), GFP_KERNEL); - if (!nclass) - return -ENOMEM; - - nclass->sclass = object->sclass; - object->sclass = nclass; - nclass->engine = engine ? nv_engine(engine) : NULL; - nclass->oclass = sclass; - sclass++; - } - - object->engine = engcls; - return 0; -} - -void -nvkm_parent_destroy(struct nvkm_parent *parent) -{ - struct nvkm_sclass *sclass; - - while ((sclass = parent->sclass)) { - parent->sclass = sclass->sclass; - kfree(sclass); - } - - nvkm_object_destroy(&parent->object); -} - - -void -_nvkm_parent_dtor(struct nvkm_object *object) -{ - nvkm_parent_destroy(nv_parent(object)); -} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c deleted file mode 100644 index 4a220eb91..000000000 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/printk.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include -#include -#include - -int nv_info_debug_level = NV_DBG_INFO_NORMAL; - -void -nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...) -{ - static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; - const char *pfx; - char mfmt[256]; - va_list args; - - switch (level) { - case NV_DBG_FATAL: - pfx = KERN_CRIT; - break; - case NV_DBG_ERROR: - pfx = KERN_ERR; - break; - case NV_DBG_WARN: - pfx = KERN_WARNING; - break; - case NV_DBG_INFO_NORMAL: - pfx = KERN_INFO; - break; - case NV_DBG_DEBUG: - case NV_DBG_PARANOIA: - case NV_DBG_TRACE: - case NV_DBG_SPAM: - default: - pfx = KERN_DEBUG; - break; - } - - if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { - struct nvkm_object *device; - struct nvkm_object *subdev; - char obuf[64], *ofmt = ""; - - if (object->engine == NULL) { - subdev = object; - while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS)) - subdev = subdev->parent; - } else { - subdev = &object->engine->subdev.object; - } - - device = subdev; - if (device->parent) - device = device->parent; - - if (object != subdev) { - snprintf(obuf, sizeof(obuf), "[0x%08x]", - nv_hclass(object)); - ofmt = obuf; - } - - if (level > nv_subdev(subdev)->debug) - return; - - snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx, - name[level], nv_subdev(subdev)->name, - nv_device(device)->name, ofmt, fmt); - } else - if (object && nv_iclass(object, NV_CLIENT_CLASS)) { - if (level > nv_client(object)->debug) - return; - - snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx, - name[level], nv_client(object)->name, fmt); - } else { - snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt); - } - - va_start(args, fmt); - vprintk(mfmt, args); - va_end(args); -} diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c index ebd4d1547..3216e157a 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/ramht.c @@ -22,8 +22,6 @@ #include #include -#include - static u32 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) { @@ -35,72 +33,130 @@ nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) } hash ^= chid << (ramht->bits - 4); - hash = hash << 3; return hash; } -int -nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context) +struct nvkm_gpuobj * +nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle) { - struct nvkm_bar *bar = nvkm_bar(ramht); u32 co, ho; co = ho = nvkm_ramht_hash(ramht, chid, handle); do { - if (!nv_ro32(ramht, co + 4)) { - nv_wo32(ramht, co + 0, handle); - nv_wo32(ramht, co + 4, context); - if (bar) - bar->flush(bar); - return co; + if (ramht->data[co].chid == chid) { + if (ramht->data[co].handle == handle) + return ramht->data[co].inst; } - co += 8; - if (co >= nv_gpuobj(ramht)->size) + if (++co >= ramht->size) co = 0; } while (co != ho); - return -ENOMEM; + return NULL; +} + +static int +nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object, + int chid, int addr, u32 handle, u32 context) +{ + struct nvkm_ramht_data *data = &ramht->data[co]; + u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */ + int ret; + + nvkm_gpuobj_del(&data->inst); + data->chid = chid; + data->handle = handle; + + if (object) { + ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst); + if (ret) { + if (ret != -ENODEV) { + data->chid = -1; + return ret; + } + data->inst = NULL; + } + + if (data->inst) { + if (ramht->device->card_type >= NV_50) + inst = data->inst->node->offset; + else + inst = data->inst->addr; + } + + if (addr < 0) context |= inst << -addr; + else context |= inst >> addr; + } + + nvkm_kmap(ramht->gpuobj); + nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle); + nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context); + nvkm_done(ramht->gpuobj); + return co + 1; } void nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie) { - struct nvkm_bar *bar = nvkm_bar(ramht); - nv_wo32(ramht, cookie + 0, 0x00000000); - nv_wo32(ramht, cookie + 4, 0x00000000); - if (bar) - bar->flush(bar); + if (--cookie >= 0) + nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0); +} + +int +nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object, + int chid, int addr, u32 handle, u32 context) +{ + u32 co, ho; + + if (nvkm_ramht_search(ramht, chid, handle)) + return -EEXIST; + + co = ho = nvkm_ramht_hash(ramht, chid, handle); + do { + if (ramht->data[co].chid < 0) { + return nvkm_ramht_update(ramht, co, object, chid, + addr, handle, context); + } + + if (++co >= ramht->size) + co = 0; + } while (co != ho); + + return -ENOSPC; } -static struct nvkm_oclass -nvkm_ramht_oclass = { - .handle = 0x0000abcd, - .ofuncs = &(struct nvkm_ofuncs) { - .ctor = NULL, - .dtor = _nvkm_gpuobj_dtor, - .init = _nvkm_gpuobj_init, - .fini = _nvkm_gpuobj_fini, - .rd32 = _nvkm_gpuobj_rd32, - .wr32 = _nvkm_gpuobj_wr32, - }, -}; +void +nvkm_ramht_del(struct nvkm_ramht **pramht) +{ + struct nvkm_ramht *ramht = *pramht; + if (ramht) { + nvkm_gpuobj_del(&ramht->gpuobj); + kfree(*pramht); + *pramht = NULL; + } +} int -nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu, - u32 size, u32 align, struct nvkm_ramht **pramht) +nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align, + struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht) { struct nvkm_ramht *ramht; - int ret; + int ret, i; - ret = nvkm_gpuobj_create(parent, parent->engine ? - &parent->engine->subdev.object : parent, /* > 3) * + sizeof(*ramht->data), GFP_KERNEL))) + return -ENOMEM; - ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3); - return 0; + ramht->device = device; + ramht->parent = parent; + ramht->size = size >> 3; + ramht->bits = order_base_2(ramht->size); + for (i = 0; i < ramht->size; i++) + ramht->data[i].chid = -1; + + ret = nvkm_gpuobj_new(ramht->device, size, align, true, + ramht->parent, &ramht->gpuobj); + if (ret) + nvkm_ramht_del(pramht); + return ret; } diff --git a/kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index c5fb3a793..7de98470a 100644 --- a/kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/kernel/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -25,96 +25,178 @@ #include #include -struct nvkm_subdev * -nvkm_subdev(void *obj, int idx) -{ - struct nvkm_object *object = nv_object(obj); - while (object && !nv_iclass(object, NV_SUBDEV_CLASS)) - object = object->parent; - if (object == NULL || nv_subidx(nv_subdev(object)) != idx) - object = nv_device(obj)->subdev[idx]; - return object ? nv_subdev(object) : NULL; -} +static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR]; + +const char * +nvkm_subdev_name[NVKM_SUBDEV_NR] = { + [NVKM_SUBDEV_BAR ] = "bar", + [NVKM_SUBDEV_VBIOS ] = "bios", + [NVKM_SUBDEV_BUS ] = "bus", + [NVKM_SUBDEV_CLK ] = "clk", + [NVKM_SUBDEV_DEVINIT] = "devinit", + [NVKM_SUBDEV_FB ] = "fb", + [NVKM_SUBDEV_FUSE ] = "fuse", + [NVKM_SUBDEV_GPIO ] = "gpio", + [NVKM_SUBDEV_I2C ] = "i2c", + [NVKM_SUBDEV_IBUS ] = "priv", + [NVKM_SUBDEV_INSTMEM] = "imem", + [NVKM_SUBDEV_LTC ] = "ltc", + [NVKM_SUBDEV_MC ] = "mc", + [NVKM_SUBDEV_MMU ] = "mmu", + [NVKM_SUBDEV_MXM ] = "mxm", + [NVKM_SUBDEV_PCI ] = "pci", + [NVKM_SUBDEV_PMU ] = "pmu", + [NVKM_SUBDEV_THERM ] = "therm", + [NVKM_SUBDEV_TIMER ] = "tmr", + [NVKM_SUBDEV_VOLT ] = "volt", + [NVKM_ENGINE_BSP ] = "bsp", + [NVKM_ENGINE_CE0 ] = "ce0", + [NVKM_ENGINE_CE1 ] = "ce1", + [NVKM_ENGINE_CE2 ] = "ce2", + [NVKM_ENGINE_CIPHER ] = "cipher", + [NVKM_ENGINE_DISP ] = "disp", + [NVKM_ENGINE_DMAOBJ ] = "dma", + [NVKM_ENGINE_FIFO ] = "fifo", + [NVKM_ENGINE_GR ] = "gr", + [NVKM_ENGINE_IFB ] = "ifb", + [NVKM_ENGINE_ME ] = "me", + [NVKM_ENGINE_MPEG ] = "mpeg", + [NVKM_ENGINE_MSENC ] = "msenc", + [NVKM_ENGINE_MSPDEC ] = "mspdec", + [NVKM_ENGINE_MSPPP ] = "msppp", + [NVKM_ENGINE_MSVLD ] = "msvld", + [NVKM_ENGINE_PM ] = "pm", + [NVKM_ENGINE_SEC ] = "sec", + [NVKM_ENGINE_SW ] = "sw", + [NVKM_ENGINE_VIC ] = "vic", + [NVKM_ENGINE_VP ] = "vp", +}; void -nvkm_subdev_reset(struct nvkm_object *subdev) +nvkm_subdev_intr(struct nvkm_subdev *subdev) { - nv_trace(subdev, "resetting...\n"); - nv_ofuncs(subdev)->fini(subdev, false); - nv_debug(subdev, "reset\n"); + if (subdev->func->intr) + subdev->func->intr(subdev); } int -nvkm_subdev_init(struct nvkm_subdev *subdev) +nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) { - int ret = nvkm_object_init(&subdev->object); - if (ret) - return ret; + struct nvkm_device *device = subdev->device; + const char *action = suspend ? "suspend" : "fini"; + u32 pmc_enable = subdev->pmc_enable; + s64 time; - nvkm_subdev_reset(&subdev->object); - return 0; -} + nvkm_trace(subdev, "%s running...\n", action); + time = ktime_to_us(ktime_get()); -int -_nvkm_subdev_init(struct nvkm_object *object) -{ - return nvkm_subdev_init(nv_subdev(object)); -} + if (subdev->func->fini) { + int ret = subdev->func->fini(subdev, suspend); + if (ret) { + nvkm_error(subdev, "%s failed, %d\n", action, ret); + if (suspend) + return ret; + } + } -int -nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) -{ - if (subdev->unit) { - nv_mask(subdev, 0x000200, subdev->unit, 0x00000000); - nv_mask(subdev, 0x000200, subdev->unit, subdev->unit); + if (pmc_enable) { + nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); + nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); + nvkm_rd32(device, 0x000200); } - return nvkm_object_fini(&subdev->object, suspend); + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "%s completed in %lldus\n", action, time); + return 0; } int -_nvkm_subdev_fini(struct nvkm_object *object, bool suspend) +nvkm_subdev_preinit(struct nvkm_subdev *subdev) { - return nvkm_subdev_fini(nv_subdev(object), suspend); -} + s64 time; -void -nvkm_subdev_destroy(struct nvkm_subdev *subdev) -{ - int subidx = nv_hclass(subdev) & 0xff; - nv_device(subdev)->subdev[subidx] = NULL; - nvkm_object_destroy(&subdev->object); -} + nvkm_trace(subdev, "preinit running...\n"); + time = ktime_to_us(ktime_get()); -void -_nvkm_subdev_dtor(struct nvkm_object *object) -{ - nvkm_subdev_destroy(nv_subdev(object)); + if (subdev->func->preinit) { + int ret = subdev->func->preinit(subdev); + if (ret) { + nvkm_error(subdev, "preinit failed, %d\n", ret); + return ret; + } + } + + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "preinit completed in %lldus\n", time); + return 0; } int -nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine, - struct nvkm_oclass *oclass, u32 pclass, - const char *subname, const char *sysname, - int size, void **pobject) +nvkm_subdev_init(struct nvkm_subdev *subdev) { - struct nvkm_subdev *subdev; + s64 time; int ret; - ret = nvkm_object_create_(parent, engine, oclass, pclass | - NV_SUBDEV_CLASS, size, pobject); - subdev = *pobject; - if (ret) - return ret; + nvkm_trace(subdev, "init running...\n"); + time = ktime_to_us(ktime_get()); + + if (subdev->func->oneinit && !subdev->oneinit) { + s64 time; + nvkm_trace(subdev, "one-time init running...\n"); + time = ktime_to_us(ktime_get()); + ret = subdev->func->oneinit(subdev); + if (ret) { + nvkm_error(subdev, "one-time init failed, %d\n", ret); + return ret; + } - __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); - subdev->name = subname; + subdev->oneinit = true; + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "one-time init completed in %lldus\n", time); + } - if (parent) { - struct nvkm_device *device = nv_device(parent); - subdev->debug = nvkm_dbgopt(device->dbgopt, subname); - subdev->mmio = nv_subdev(device)->mmio; + if (subdev->func->init) { + ret = subdev->func->init(subdev); + if (ret) { + nvkm_error(subdev, "init failed, %d\n", ret); + return ret; + } } + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "init completed in %lldus\n", time); return 0; } + +void +nvkm_subdev_del(struct nvkm_subdev **psubdev) +{ + struct nvkm_subdev *subdev = *psubdev; + s64 time; + + if (subdev && !WARN_ON(!subdev->func)) { + nvkm_trace(subdev, "destroy running...\n"); + time = ktime_to_us(ktime_get()); + if (subdev->func->dtor) + *psubdev = subdev->func->dtor(subdev); + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "destroy completed in %lldus\n", time); + kfree(*psubdev); + *psubdev = NULL; + } +} + +void +nvkm_subdev_ctor(const struct nvkm_subdev_func *func, + struct nvkm_device *device, int index, u32 pmc_enable, + struct nvkm_subdev *subdev) +{ + const char *name = nvkm_subdev_name[index]; + subdev->func = func; + subdev->device = device; + subdev->index = index; + subdev->pmc_enable = pmc_enable; + + __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]); + subdev->debug = nvkm_dbgopt(device->dbgopt, name); +} -- cgit 1.2.3-korg