/* * Copyright 2008 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Jerome Glisse */ #include #include #include #include "radeon_reg.h" #include "radeon.h" #include "radeon_trace.h" #define RADEON_CS_MAX_PRIORITY 32u #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) /* This is based on the bucket sort with O(n) time complexity. * An item with priority "i" is added to bucket[i]. The lists are then * concatenated in descending order. */ struct radeon_cs_buckets { struct list_head bucket[RADEON_CS_NUM_BUCKETS]; }; static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) { unsigned i; for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) INIT_LIST_HEAD(&b->bucket[i]); } static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, struct list_head *item, unsigned priority) { /* Since buffers which appear sooner in the relocation list are * likely to be used more often than buffers which appear later * in the list, the sort mustn't change the ordering of buffers * with the same priority, i.e. it must be stable. */ list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); } static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, struct list_head *out_list) { unsigned i; /* Connect the sorted buckets in the output list. */ for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { list_splice(&b->bucket[i], out_list); } } static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) { struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; unsigned i; bool need_mmap_lock = false; int r; if (p->chunk_relocs == NULL) { return 0; } chunk = p->chunk_relocs; p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); if (p->relocs == NULL) { return -ENOMEM; } radeon_cs_buckets_init(&buckets); for (i = 0; i < p->nrelocs; i++) { struct drm_radeon_cs_reloc *r; struct drm_gem_object *gobj; unsigned priority; r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; gobj = drm_gem_object_lookup(ddev, p->filp, r->handle); if (gobj == NULL) { DRM_ERROR("gem object lookup failed 0x%x\n", r->handle); return -ENOENT; } p->relocs[i].robj = gem_to_radeon_bo(gobj); /* The userspace buffer priorities are from 0 to 15. A higher * number means the buffer is more important. * Also, the buffers used for write have a higher priority than * the buffers used for read only, which doubles the range * to 0 to 31. 32 is reserved for the kernel driver. */ priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 + !!r->write_domain; /* the first reloc of an UVD job is the msg and that must be in VRAM, also but everything into VRAM on AGP cards and older IGP chips to avoid image corruptions */ if (p->ring == R600_RING_TYPE_UVD_INDEX && (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) || p->rdev->family == CHIP_RS780 || p->rdev->family == CHIP_RS880)) { /* TODO: is this still needed for NI+ ? */ p->relocs[i].prefered_domains = RADEON_GEM_DOMAIN_VRAM; p->relocs[i].allowed_domains = RADEON_GEM_DOMAIN_VRAM; /* prioritize this over any other relocation */ priority = RADEON_CS_MAX_PRIORITY; } else { uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; if (domain & RADEON_GEM_DOMAIN_CPU) { DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " "for command submission\n"); return -EINVAL; } p->relocs[i].prefered_domains = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; p->relocs[i].allowed_domains = domain; } if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { uint32_t domain = p->relocs[i].prefered_domains; if (!(domain & RADEON_GEM_DOMAIN_GTT)) { DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT i
Query task result data

This api offer the interface to get the result data via task_id
We will return a result json dict
---
tags:
  - Results
parameters:
  -
    in: query
    name: action
    type: string
    default: getResult
    required: true
  -
    in: query
    name: measurement
    type: string
    description: test case name
    required: true
  -
    in: query
    name: task_id
    type: string
    description: the task_id you get before
    required: true
responses:
  200:
    description: a result json dict
    schema:
      id: ResultModel
      properties:
        status:
          type: string
          description: the status of the certain task
          default: success
        result:
          schema:
            type: array
            items:
              type: object
xt. * @pkt: where to store packet information * * Assume that chunk_ib_index is properly set. Will return -EINVAL * if packet is bigger than remaining ib size. or if packets is unknown. **/ int radeon_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_device *rdev = p->rdev; uint32_t header; int ret = 0, i; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", idx, ib_chunk->length_dw); return -EINVAL; } header = radeon_get_ib_value(p, idx); pkt->idx = idx; pkt->type = RADEON_CP_PACKET_GET_TYPE(header); pkt->count = RADEON_CP_PACKET_GET_COUNT(header); pkt->one_reg_wr = 0; switch (pkt->type) { case RADEON_PACKET_TYPE0: if (rdev->family < CHIP_R600) { pkt->reg = R100_CP_PACKET0_GET_REG(header); pkt->one_reg_wr = RADEON_CP_PACKET0_GET_ONE_REG_WR(header); } else pkt->reg = R600_CP_PACKET0_GET_REG(header); break; case RADEON_PACKET_TYPE3: pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header); break; case RADEON_PACKET_TYPE2: pkt->count = -1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); ret = -EINVAL; goto dump_ib; } if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); ret = -EINVAL; goto dump_ib; } return 0; dump_ib: for (i = 0; i < ib_chunk->length_dw; i++) { if (i == idx) printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); else printk("\t0x%08x\n", radeon_get_ib_value(p, i)); } return ret; } /** * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP * @p: structure holding the parser context. * * Check if the next packet is NOP relocation packet3. **/ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) { struct radeon_cs_packet p3reloc; int r; r = radeon_cs_packet_parse(p, &p3reloc, p->idx); if (r) return false; if (p3reloc.type != RADEON_PACKET_TYPE3) return false; if (p3reloc.opcode != RADEON_PACKET3_NOP) return false; return true; } /** * radeon_cs_dump_packet() - dump raw packet context * @p: structure holding the parser context. * @pkt: structure holding the packet. * * Used mostly for debugging and error reporting. **/ void radeon_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { volatile uint32_t *ib; unsigned i; unsigned idx; ib = p->ib.ptr; idx = pkt->idx; for (i = 0; i <= (pkt->count + 1); i++, idx++) DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); } /** * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet * @parser: parser structure holding parsing context. * @data: pointer to relocation data * @offset_start: starting offset * @offset_mask: offset mask (to align start offset on) * @reloc: reloc informations * * Check if next packet is relocation packet3, do bo validation and compute * GPU offset using the provided start. **/ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_bo_list **cs_reloc, int nomm) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_packet p3reloc; unsigned idx; int r; if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; relocs_chunk = p->chunk_relocs; r = radeon_cs_packet_parse(p, &p3reloc, p->idx); if (r) return r; p->idx += p3reloc.count + 2; if (p3reloc.type != RADEON_PACKET_TYPE3 || p3reloc.opcode != RADEON_PACKET3_NOP) { DRM_ERROR("No packet3 for relocation for packet at %d.\n", p3reloc.idx); radeon_cs_dump_packet(p, &p3reloc); return -EINVAL; } idx = radeon_get_ib_value(p, p3reloc.idx + 1); if (idx >= relocs_chunk->length_dw) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, relocs_chunk->length_dw); radeon_cs_dump_packet(p, &p3reloc); return -EINVAL; } /* FIXME: we assume reloc size is 4 dwords */ if (nomm) { *cs_reloc = p->relocs; (*cs_reloc)->gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; } else *cs_reloc = &p->relocs[(idx / 4)]; return 0; }