1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 25 #include <stdlib.h> 26 #include <stdint.h> 27 #include <stddef.h> 28 #include <errno.h> 29 30 #include "private.h" 31 32 #include "nvif/class.h" 33 34 static int 35 abi16_chan_nv04(struct nouveau_object *obj) 36 { 37 struct nouveau_drm *drm = nouveau_drm(obj); 38 struct nv04_fifo *nv04 = obj->data; 39 struct drm_nouveau_channel_alloc req = { 40 .fb_ctxdma_handle = nv04->vram, 41 .tt_ctxdma_handle = nv04->gart 42 }; 43 int ret; 44 45 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC, 46 &req, sizeof(req)); 47 if (ret) 48 return ret; 49 50 nv04->base.channel = req.channel; 51 nv04->base.pushbuf = req.pushbuf_domains; 52 nv04->notify = req.notifier_handle; 53 nv04->base.object->handle = req.channel; 54 nv04->base.object->length = sizeof(*nv04); 55 return 0; 56 } 57 58 static int 59 abi16_chan_nvc0(struct nouveau_object *obj) 60 { 61 struct nouveau_drm *drm = nouveau_drm(obj); 62 struct drm_nouveau_channel_alloc req = {}; 63 struct nvc0_fifo *nvc0 = obj->data; 64 int ret; 65 66 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC, 67 &req, sizeof(req)); 68 if (ret) 69 return ret; 70 71 nvc0->base.channel = req.channel; 72 nvc0->base.pushbuf = req.pushbuf_domains; 73 nvc0->notify = req.notifier_handle; 74 nvc0->base.object->handle = req.channel; 75 nvc0->base.object->length = sizeof(*nvc0); 76 return 0; 77 } 78 79 static int 80 abi16_chan_nve0(struct nouveau_object *obj) 81 { 82 struct nouveau_drm *drm = nouveau_drm(obj); 83 struct drm_nouveau_channel_alloc req = {}; 84 struct nve0_fifo *nve0 = obj->data; 85 int ret; 86 87 if (obj->length > offsetof(struct nve0_fifo, engine)) { 88 req.fb_ctxdma_handle = 0xffffffff; 89 req.tt_ctxdma_handle = nve0->engine; 90 } 91 92 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC, 93 &req, sizeof(req)); 94 if (ret) 95 return ret; 96 97 nve0->base.channel = req.channel; 98 nve0->base.pushbuf = req.pushbuf_domains; 99 nve0->notify = req.notifier_handle; 100 nve0->base.object->handle = req.channel; 101 nve0->base.object->length = sizeof(*nve0); 102 return 0; 103 } 104 105 static int 106 abi16_engobj(struct nouveau_object *obj) 107 { 108 struct nouveau_drm *drm = nouveau_drm(obj); 109 struct drm_nouveau_grobj_alloc req = { 110 .channel = obj->parent->handle, 111 .handle = obj->handle, 112 .class = obj->oclass, 113 }; 114 int ret; 115 116 /* Older kernel versions did not have the concept of nouveau- 117 * specific classes and abused some NVIDIA-assigned ones for 118 * a SW class. The ABI16 layer has compatibility in place to 119 * translate these older identifiers to the newer ones. 120 * 121 * Clients that have been updated to use NVIF are required to 122 * use the newer class identifiers, which means that they'll 123 * break if running on an older kernel. 124 * 125 * To handle this case, when using ABI16, we translate to the 126 * older values which work on any kernel. 127 */ 128 switch (req.class) { 129 case NVIF_CLASS_SW_NV04 : req.class = 0x006e; break; 130 case NVIF_CLASS_SW_NV10 : req.class = 0x016e; break; 131 case NVIF_CLASS_SW_NV50 : req.class = 0x506e; break; 132 case NVIF_CLASS_SW_GF100: req.class = 0x906e; break; 133 default: 134 break; 135 } 136 137 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GROBJ_ALLOC, 138 &req, sizeof(req)); 139 if (ret) 140 return ret; 141 142 obj->length = sizeof(struct nouveau_object *); 143 return 0; 144 } 145 146 static int 147 abi16_ntfy(struct nouveau_object *obj) 148 { 149 struct nouveau_drm *drm = nouveau_drm(obj); 150 struct nv04_notify *ntfy = obj->data; 151 struct drm_nouveau_notifierobj_alloc req = { 152 .channel = obj->parent->handle, 153 .handle = ntfy->object->handle, 154 .size = ntfy->length, 155 }; 156 int ret; 157 158 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, 159 &req, sizeof(req)); 160 if (ret) 161 return ret; 162 163 ntfy->offset = req.offset; 164 ntfy->object->length = sizeof(*ntfy); 165 return 0; 166 } 167 168 drm_private int 169 abi16_sclass(struct nouveau_object *obj, struct nouveau_sclass **psclass) 170 { 171 struct nouveau_sclass *sclass; 172 struct nouveau_device *dev; 173 174 if (!(sclass = calloc(8, sizeof(*sclass)))) 175 return -ENOMEM; 176 *psclass = sclass; 177 178 switch (obj->oclass) { 179 case NOUVEAU_FIFO_CHANNEL_CLASS: 180 /* Older kernel versions were exposing the wrong video engine 181 * classes on certain G98:GF100 boards. This has since been 182 * corrected, but ABI16 has compatibility in place to avoid 183 * breaking older userspace. 184 * 185 * Clients that have been updated to use NVIF are required to 186 * use the correct classes, which means that they'll break if 187 * running on an older kernel. 188 * 189 * To handle this issue, if using the older kernel interfaces, 190 * we'll magic up a list containing the vdec classes that the 191 * kernel will accept for these boards. Clients should make 192 * use of this information instead of hardcoding classes for 193 * specific chipsets. 194 */ 195 dev = (struct nouveau_device *)obj->parent; 196 if (dev->chipset >= 0x98 && 197 dev->chipset != 0xa0 && 198 dev->chipset < 0xc0) { 199 *sclass++ = (struct nouveau_sclass){ 200 GT212_MSVLD, -1, -1 201 }; 202 *sclass++ = (struct nouveau_sclass){ 203 GT212_MSPDEC, -1, -1 204 }; 205 *sclass++ = (struct nouveau_sclass){ 206 GT212_MSPPP, -1, -1 207 }; 208 } 209 break; 210 default: 211 break; 212 } 213 214 return sclass - *psclass; 215 } 216 217 drm_private void 218 abi16_delete(struct nouveau_object *obj) 219 { 220 struct nouveau_drm *drm = nouveau_drm(obj); 221 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) { 222 struct drm_nouveau_channel_free req; 223 req.channel = obj->handle; 224 drmCommandWrite(drm->fd, DRM_NOUVEAU_CHANNEL_FREE, 225 &req, sizeof(req)); 226 } else { 227 struct drm_nouveau_gpuobj_free req; 228 req.channel = obj->parent->handle; 229 req.handle = obj->handle; 230 drmCommandWrite(drm->fd, DRM_NOUVEAU_GPUOBJ_FREE, 231 &req, sizeof(req)); 232 } 233 } 234 235 drm_private bool 236 abi16_object(struct nouveau_object *obj, int (**func)(struct nouveau_object *)) 237 { 238 struct nouveau_object *parent = obj->parent; 239 240 /* nouveau_object::length is (ab)used to determine whether the 241 * object is a legacy object (!=0), or a real NVIF object. 242 */ 243 if ((parent->length != 0 && parent->oclass == NOUVEAU_DEVICE_CLASS) || 244 (parent->length == 0 && parent->oclass == NV_DEVICE)) { 245 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) { 246 struct nouveau_device *dev = (void *)parent; 247 if (dev->chipset < 0xc0) 248 *func = abi16_chan_nv04; 249 else 250 if (dev->chipset < 0xe0) 251 *func = abi16_chan_nvc0; 252 else 253 *func = abi16_chan_nve0; 254 return true; 255 } 256 } else 257 if ((parent->length != 0 && 258 parent->oclass == NOUVEAU_FIFO_CHANNEL_CLASS)) { 259 if (obj->oclass == NOUVEAU_NOTIFIER_CLASS) { 260 *func = abi16_ntfy; 261 return true; 262 } 263 264 *func = abi16_engobj; 265 return false; /* try NVIF, if supported, before calling func */ 266 } 267 268 *func = NULL; 269 return false; 270 } 271 272 drm_private void 273 abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info) 274 { 275 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 276 277 nvbo->map_handle = info->map_handle; 278 bo->handle = info->handle; 279 bo->size = info->size; 280 bo->offset = info->offset; 281 282 bo->flags = 0; 283 if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM) 284 bo->flags |= NOUVEAU_BO_VRAM; 285 if (info->domain & NOUVEAU_GEM_DOMAIN_GART) 286 bo->flags |= NOUVEAU_BO_GART; 287 if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)) 288 bo->flags |= NOUVEAU_BO_CONTIG; 289 if (nvbo->map_handle) 290 bo->flags |= NOUVEAU_BO_MAP; 291 292 if (bo->device->chipset >= 0xc0) { 293 bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8; 294 bo->config.nvc0.tile_mode = info->tile_mode; 295 } else 296 if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) { 297 bo->config.nv50.memtype = (info->tile_flags & 0x07f00) >> 8 | 298 (info->tile_flags & 0x30000) >> 9; 299 bo->config.nv50.tile_mode = info->tile_mode << 4; 300 } else { 301 bo->config.nv04.surf_flags = info->tile_flags & 7; 302 bo->config.nv04.surf_pitch = info->tile_mode; 303 } 304 } 305 306 drm_private int 307 abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment, 308 union nouveau_bo_config *config) 309 { 310 struct nouveau_device *dev = bo->device; 311 struct nouveau_drm *drm = nouveau_drm(&dev->object); 312 struct drm_nouveau_gem_new req = {}; 313 struct drm_nouveau_gem_info *info = &req.info; 314 int ret; 315 316 if (bo->flags & NOUVEAU_BO_VRAM) 317 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM; 318 if (bo->flags & NOUVEAU_BO_GART) 319 info->domain |= NOUVEAU_GEM_DOMAIN_GART; 320 if (!info->domain) 321 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM | 322 NOUVEAU_GEM_DOMAIN_GART; 323 324 if (bo->flags & NOUVEAU_BO_MAP) 325 info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE; 326 327 if (bo->flags & NOUVEAU_BO_COHERENT) 328 info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT; 329 330 if (!(bo->flags & NOUVEAU_BO_CONTIG)) 331 info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG; 332 333 info->size = bo->size; 334 req.align = alignment; 335 336 if (config) { 337 if (dev->chipset >= 0xc0) { 338 info->tile_flags = (config->nvc0.memtype & 0xff) << 8; 339 info->tile_mode = config->nvc0.tile_mode; 340 } else 341 if (dev->chipset >= 0x80 || dev->chipset == 0x50) { 342 info->tile_flags = (config->nv50.memtype & 0x07f) << 8 | 343 (config->nv50.memtype & 0x180) << 9; 344 info->tile_mode = config->nv50.tile_mode >> 4; 345 } else { 346 info->tile_flags = config->nv04.surf_flags & 7; 347 info->tile_mode = config->nv04.surf_pitch; 348 } 349 } 350 351 if (!nouveau_device(dev)->have_bo_usage) 352 info->tile_flags &= 0x0000ff00; 353 354 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_NEW, 355 &req, sizeof(req)); 356 if (ret == 0) 357 abi16_bo_info(bo, &req.info); 358 return ret; 359 } 360