Lines Matching full:fb
35 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) in nvkm_fb_tile_fini() argument
37 fb->func->tile.fini(fb, region, tile); in nvkm_fb_tile_fini()
41 nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size, in nvkm_fb_tile_init() argument
44 fb->func->tile.init(fb, region, addr, size, pitch, flags, tile); in nvkm_fb_tile_init()
48 nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) in nvkm_fb_tile_prog() argument
50 struct nvkm_device *device = fb->subdev.device; in nvkm_fb_tile_prog()
51 if (fb->func->tile.prog) { in nvkm_fb_tile_prog()
52 fb->func->tile.prog(fb, region, tile); in nvkm_fb_tile_prog()
63 struct nvkm_fb *fb = device->fb; in nvkm_fb_sysmem_flush_page_init() local
65 if (fb->func->sysmem.flush_page_init) in nvkm_fb_sysmem_flush_page_init()
66 fb->func->sysmem.flush_page_init(fb); in nvkm_fb_sysmem_flush_page_init()
100 struct nvkm_fb *fb = nvkm_fb(subdev); in nvkm_fb_intr() local
101 if (fb->func->intr) in nvkm_fb_intr()
102 fb->func->intr(fb); in nvkm_fb_intr()
108 struct nvkm_fb *fb = nvkm_fb(subdev); in nvkm_fb_oneinit() local
111 if (fb->func->ram_new) { in nvkm_fb_oneinit()
112 int ret = fb->func->ram_new(fb, &fb->ram); in nvkm_fb_oneinit()
119 if (fb->func->oneinit) { in nvkm_fb_oneinit()
120 int ret = fb->func->oneinit(fb); in nvkm_fb_oneinit()
129 if (fb->func->tags) { in nvkm_fb_oneinit()
130 tags = fb->func->tags(fb); in nvkm_fb_oneinit()
134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); in nvkm_fb_oneinit()
138 nvkm_fb_mem_unlock(struct nvkm_fb *fb) in nvkm_fb_mem_unlock() argument
140 struct nvkm_subdev *subdev = &fb->subdev; in nvkm_fb_mem_unlock()
143 if (!fb->func->vpr.scrub_required) in nvkm_fb_mem_unlock()
150 if (!fb->func->vpr.scrub_required(fb)) { in nvkm_fb_mem_unlock()
157 if (!fb->vpr_scrubber.fw.img) { in nvkm_fb_mem_unlock()
162 ret = fb->func->vpr.scrub(fb); in nvkm_fb_mem_unlock()
168 if (fb->func->vpr.scrub_required(fb)) { in nvkm_fb_mem_unlock()
180 struct nvkm_fb *fb = device->fb; in nvkm_fb_vidmem_size() local
182 if (fb && fb->func->vidmem.size) in nvkm_fb_vidmem_size()
183 return fb->func->vidmem.size(fb); in nvkm_fb_vidmem_size()
192 struct nvkm_fb *fb = nvkm_fb(subdev); in nvkm_fb_init() local
195 if (fb->ram) { in nvkm_fb_init()
196 ret = nvkm_ram_init(fb->ram); in nvkm_fb_init()
201 for (i = 0; i < fb->tile.regions; i++) in nvkm_fb_init()
202 fb->func->tile.prog(fb, i, &fb->tile.region[i]); in nvkm_fb_init()
206 if (fb->func->init) in nvkm_fb_init()
207 fb->func->init(fb); in nvkm_fb_init()
209 if (fb->func->init_remapper) in nvkm_fb_init()
210 fb->func->init_remapper(fb); in nvkm_fb_init()
212 if (fb->func->init_page) { in nvkm_fb_init()
213 ret = fb->func->init_page(fb); in nvkm_fb_init()
218 if (fb->func->init_unkn) in nvkm_fb_init()
219 fb->func->init_unkn(fb); in nvkm_fb_init()
234 struct nvkm_fb *fb = nvkm_fb(subdev); in nvkm_fb_dtor() local
237 nvkm_memory_unref(&fb->mmu_wr); in nvkm_fb_dtor()
238 nvkm_memory_unref(&fb->mmu_rd); in nvkm_fb_dtor()
240 for (i = 0; i < fb->tile.regions; i++) in nvkm_fb_dtor()
241 fb->func->tile.fini(fb, i, &fb->tile.region[i]); in nvkm_fb_dtor()
243 nvkm_mm_fini(&fb->tags.mm); in nvkm_fb_dtor()
244 mutex_destroy(&fb->tags.mutex); in nvkm_fb_dtor()
246 nvkm_ram_del(&fb->ram); in nvkm_fb_dtor()
248 nvkm_falcon_fw_dtor(&fb->vpr_scrubber); in nvkm_fb_dtor()
250 if (fb->sysmem.flush_page) { in nvkm_fb_dtor()
251 dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr, in nvkm_fb_dtor()
253 __free_page(fb->sysmem.flush_page); in nvkm_fb_dtor()
256 if (fb->func->dtor) in nvkm_fb_dtor()
257 return fb->func->dtor(fb); in nvkm_fb_dtor()
259 return fb; in nvkm_fb_dtor()
273 enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb) in nvkm_fb_ctor() argument
275 nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev); in nvkm_fb_ctor()
276 fb->func = func; in nvkm_fb_ctor()
277 fb->tile.regions = fb->func->tile.regions; in nvkm_fb_ctor()
278 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage); in nvkm_fb_ctor()
279 mutex_init(&fb->tags.mutex); in nvkm_fb_ctor()
282 fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO); in nvkm_fb_ctor()
283 if (!fb->sysmem.flush_page) in nvkm_fb_ctor()
286 fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page, in nvkm_fb_ctor()
288 if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr)) in nvkm_fb_ctor()