xref: /linux/drivers/gpu/drm/radeon/radeon_device.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/efi.h>
30 #include <linux/pci.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
33 #include <linux/vga_switcheroo.h>
34 #include <linux/vgaarb.h>
35 
36 #include <drm/drm_cache.h>
37 #include <drm/drm_client_event.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_device.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_file.h>
42 #include <drm/drm_framebuffer.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/radeon_drm.h>
45 
46 #include "radeon_device.h"
47 #include "radeon_reg.h"
48 #include "radeon.h"
49 #include "atom.h"
50 
51 static const char radeon_family_name[][16] = {
52 	"R100",
53 	"RV100",
54 	"RS100",
55 	"RV200",
56 	"RS200",
57 	"R200",
58 	"RV250",
59 	"RS300",
60 	"RV280",
61 	"R300",
62 	"R350",
63 	"RV350",
64 	"RV380",
65 	"R420",
66 	"R423",
67 	"RV410",
68 	"RS400",
69 	"RS480",
70 	"RS600",
71 	"RS690",
72 	"RS740",
73 	"RV515",
74 	"R520",
75 	"RV530",
76 	"RV560",
77 	"RV570",
78 	"R580",
79 	"R600",
80 	"RV610",
81 	"RV630",
82 	"RV670",
83 	"RV620",
84 	"RV635",
85 	"RS780",
86 	"RS880",
87 	"RV770",
88 	"RV730",
89 	"RV710",
90 	"RV740",
91 	"CEDAR",
92 	"REDWOOD",
93 	"JUNIPER",
94 	"CYPRESS",
95 	"HEMLOCK",
96 	"PALM",
97 	"SUMO",
98 	"SUMO2",
99 	"BARTS",
100 	"TURKS",
101 	"CAICOS",
102 	"CAYMAN",
103 	"ARUBA",
104 	"TAHITI",
105 	"PITCAIRN",
106 	"VERDE",
107 	"OLAND",
108 	"HAINAN",
109 	"BONAIRE",
110 	"KAVERI",
111 	"KABINI",
112 	"HAWAII",
113 	"MULLINS",
114 	"LAST",
115 };
116 
117 #if defined(CONFIG_VGA_SWITCHEROO)
118 bool radeon_has_atpx_dgpu_power_cntl(void);
119 bool radeon_is_atpx_hybrid(void);
120 #else
121 static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
122 static inline bool radeon_is_atpx_hybrid(void) { return false; }
123 #endif
124 
125 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
126 
127 struct radeon_px_quirk {
128 	u32 chip_vendor;
129 	u32 chip_device;
130 	u32 subsys_vendor;
131 	u32 subsys_device;
132 	u32 px_quirk_flags;
133 };
134 
135 static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
137 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
138 	 */
139 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
140 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
141 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
142 	 */
143 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
144 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
145 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
146 	 */
147 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
148 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
149 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
150 	 */
151 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
152 	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
153 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
154 	 */
155 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
156 	{ 0, 0, 0, 0, 0 },
157 };
158 
159 bool radeon_is_px(struct drm_device *dev)
160 {
161 	struct radeon_device *rdev = dev->dev_private;
162 
163 	if (rdev->flags & RADEON_IS_PX)
164 		return true;
165 	return false;
166 }
167 
168 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
169 {
170 	struct radeon_px_quirk *p = radeon_px_quirk_list;
171 
172 	/* Apply PX quirks */
173 	while (p && p->chip_device != 0) {
174 		if (rdev->pdev->vendor == p->chip_vendor &&
175 		    rdev->pdev->device == p->chip_device &&
176 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
177 		    rdev->pdev->subsystem_device == p->subsys_device) {
178 			rdev->px_quirk_flags = p->px_quirk_flags;
179 			break;
180 		}
181 		++p;
182 	}
183 
184 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
185 		rdev->flags &= ~RADEON_IS_PX;
186 
187 	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
188 	if (!radeon_is_atpx_hybrid() &&
189 	    !radeon_has_atpx_dgpu_power_cntl())
190 		rdev->flags &= ~RADEON_IS_PX;
191 }
192 
193 /**
194  * radeon_program_register_sequence - program an array of registers.
195  *
196  * @rdev: radeon_device pointer
197  * @registers: pointer to the register array
198  * @array_size: size of the register array
199  *
200  * Programs an array or registers with and and or masks.
201  * This is a helper for setting golden registers.
202  */
203 void radeon_program_register_sequence(struct radeon_device *rdev,
204 				      const u32 *registers,
205 				      const u32 array_size)
206 {
207 	u32 tmp, reg, and_mask, or_mask;
208 	int i;
209 
210 	if (array_size % 3)
211 		return;
212 
213 	for (i = 0; i < array_size; i +=3) {
214 		reg = registers[i + 0];
215 		and_mask = registers[i + 1];
216 		or_mask = registers[i + 2];
217 
218 		if (and_mask == 0xffffffff) {
219 			tmp = or_mask;
220 		} else {
221 			tmp = RREG32(reg);
222 			tmp &= ~and_mask;
223 			tmp |= or_mask;
224 		}
225 		WREG32(reg, tmp);
226 	}
227 }
228 
229 void radeon_pci_config_reset(struct radeon_device *rdev)
230 {
231 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
232 }
233 
234 /**
235  * radeon_surface_init - Clear GPU surface registers.
236  *
237  * @rdev: radeon_device pointer
238  *
239  * Clear GPU surface registers (r1xx-r5xx).
240  */
241 void radeon_surface_init(struct radeon_device *rdev)
242 {
243 	/* FIXME: check this out */
244 	if (rdev->family < CHIP_R600) {
245 		int i;
246 
247 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
248 			if (rdev->surface_regs[i].bo)
249 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
250 			else
251 				radeon_clear_surface_reg(rdev, i);
252 		}
253 		/* enable surfaces */
254 		WREG32(RADEON_SURFACE_CNTL, 0);
255 	}
256 }
257 
258 /*
259  * GPU scratch registers helpers function.
260  */
261 /**
262  * radeon_scratch_init - Init scratch register driver information.
263  *
264  * @rdev: radeon_device pointer
265  *
266  * Init CP scratch register driver information (r1xx-r5xx)
267  */
268 void radeon_scratch_init(struct radeon_device *rdev)
269 {
270 	int i;
271 
272 	/* FIXME: check this out */
273 	if (rdev->family < CHIP_R300) {
274 		rdev->scratch.num_reg = 5;
275 	} else {
276 		rdev->scratch.num_reg = 7;
277 	}
278 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
279 	for (i = 0; i < rdev->scratch.num_reg; i++) {
280 		rdev->scratch.free[i] = true;
281 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
282 	}
283 }
284 
285 /**
286  * radeon_scratch_get - Allocate a scratch register
287  *
288  * @rdev: radeon_device pointer
289  * @reg: scratch register mmio offset
290  *
291  * Allocate a CP scratch register for use by the driver (all asics).
292  * Returns 0 on success or -EINVAL on failure.
293  */
294 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
295 {
296 	int i;
297 
298 	for (i = 0; i < rdev->scratch.num_reg; i++) {
299 		if (rdev->scratch.free[i]) {
300 			rdev->scratch.free[i] = false;
301 			*reg = rdev->scratch.reg[i];
302 			return 0;
303 		}
304 	}
305 	return -EINVAL;
306 }
307 
308 /**
309  * radeon_scratch_free - Free a scratch register
310  *
311  * @rdev: radeon_device pointer
312  * @reg: scratch register mmio offset
313  *
314  * Free a CP scratch register allocated for use by the driver (all asics)
315  */
316 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
317 {
318 	int i;
319 
320 	for (i = 0; i < rdev->scratch.num_reg; i++) {
321 		if (rdev->scratch.reg[i] == reg) {
322 			rdev->scratch.free[i] = true;
323 			return;
324 		}
325 	}
326 }
327 
328 /*
329  * GPU doorbell aperture helpers function.
330  */
331 /**
332  * radeon_doorbell_init - Init doorbell driver information.
333  *
334  * @rdev: radeon_device pointer
335  *
336  * Init doorbell driver information (CIK)
337  * Returns 0 on success, error on failure.
338  */
339 static int radeon_doorbell_init(struct radeon_device *rdev)
340 {
341 	/* doorbell bar mapping */
342 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
343 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
344 
345 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
346 	if (rdev->doorbell.num_doorbells == 0)
347 		return -EINVAL;
348 
349 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
350 	if (rdev->doorbell.ptr == NULL) {
351 		return -ENOMEM;
352 	}
353 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
354 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
355 
356 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
357 
358 	return 0;
359 }
360 
361 /**
362  * radeon_doorbell_fini - Tear down doorbell driver information.
363  *
364  * @rdev: radeon_device pointer
365  *
366  * Tear down doorbell driver information (CIK)
367  */
368 static void radeon_doorbell_fini(struct radeon_device *rdev)
369 {
370 	iounmap(rdev->doorbell.ptr);
371 	rdev->doorbell.ptr = NULL;
372 }
373 
374 /**
375  * radeon_doorbell_get - Allocate a doorbell entry
376  *
377  * @rdev: radeon_device pointer
378  * @doorbell: doorbell index
379  *
380  * Allocate a doorbell for use by the driver (all asics).
381  * Returns 0 on success or -EINVAL on failure.
382  */
383 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
384 {
385 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
386 	if (offset < rdev->doorbell.num_doorbells) {
387 		__set_bit(offset, rdev->doorbell.used);
388 		*doorbell = offset;
389 		return 0;
390 	} else {
391 		return -EINVAL;
392 	}
393 }
394 
395 /**
396  * radeon_doorbell_free - Free a doorbell entry
397  *
398  * @rdev: radeon_device pointer
399  * @doorbell: doorbell index
400  *
401  * Free a doorbell allocated for use by the driver (all asics)
402  */
403 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
404 {
405 	if (doorbell < rdev->doorbell.num_doorbells)
406 		__clear_bit(doorbell, rdev->doorbell.used);
407 }
408 
409 /*
410  * radeon_wb_*()
411  * Writeback is the method by which the GPU updates special pages
412  * in memory with the status of certain GPU events (fences, ring pointers,
413  * etc.).
414  */
415 
416 /**
417  * radeon_wb_disable - Disable Writeback
418  *
419  * @rdev: radeon_device pointer
420  *
421  * Disables Writeback (all asics).  Used for suspend.
422  */
423 void radeon_wb_disable(struct radeon_device *rdev)
424 {
425 	rdev->wb.enabled = false;
426 }
427 
428 /**
429  * radeon_wb_fini - Disable Writeback and free memory
430  *
431  * @rdev: radeon_device pointer
432  *
433  * Disables Writeback and frees the Writeback memory (all asics).
434  * Used at driver shutdown.
435  */
436 void radeon_wb_fini(struct radeon_device *rdev)
437 {
438 	radeon_wb_disable(rdev);
439 	if (rdev->wb.wb_obj) {
440 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
441 			radeon_bo_kunmap(rdev->wb.wb_obj);
442 			radeon_bo_unpin(rdev->wb.wb_obj);
443 			radeon_bo_unreserve(rdev->wb.wb_obj);
444 		}
445 		radeon_bo_unref(&rdev->wb.wb_obj);
446 		rdev->wb.wb = NULL;
447 		rdev->wb.wb_obj = NULL;
448 	}
449 }
450 
451 /**
452  * radeon_wb_init- Init Writeback driver info and allocate memory
453  *
454  * @rdev: radeon_device pointer
455  *
456  * Disables Writeback and frees the Writeback memory (all asics).
457  * Used at driver startup.
458  * Returns 0 on success or an -error on failure.
459  */
460 int radeon_wb_init(struct radeon_device *rdev)
461 {
462 	int r;
463 
464 	if (rdev->wb.wb_obj == NULL) {
465 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
466 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
467 				     &rdev->wb.wb_obj);
468 		if (r) {
469 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
470 			return r;
471 		}
472 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
473 		if (unlikely(r != 0)) {
474 			radeon_wb_fini(rdev);
475 			return r;
476 		}
477 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
478 				&rdev->wb.gpu_addr);
479 		if (r) {
480 			radeon_bo_unreserve(rdev->wb.wb_obj);
481 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
482 			radeon_wb_fini(rdev);
483 			return r;
484 		}
485 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
486 		radeon_bo_unreserve(rdev->wb.wb_obj);
487 		if (r) {
488 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
489 			radeon_wb_fini(rdev);
490 			return r;
491 		}
492 	}
493 
494 	/* clear wb memory */
495 	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
496 	/* disable event_write fences */
497 	rdev->wb.use_event = false;
498 	/* disabled via module param */
499 	if (radeon_no_wb == 1) {
500 		rdev->wb.enabled = false;
501 	} else {
502 		if (rdev->flags & RADEON_IS_AGP) {
503 			/* often unreliable on AGP */
504 			rdev->wb.enabled = false;
505 		} else if (rdev->family < CHIP_R300) {
506 			/* often unreliable on pre-r300 */
507 			rdev->wb.enabled = false;
508 		} else {
509 			rdev->wb.enabled = true;
510 			/* event_write fences are only available on r600+ */
511 			if (rdev->family >= CHIP_R600) {
512 				rdev->wb.use_event = true;
513 			}
514 		}
515 	}
516 	/* always use writeback/events on NI, APUs */
517 	if (rdev->family >= CHIP_PALM) {
518 		rdev->wb.enabled = true;
519 		rdev->wb.use_event = true;
520 	}
521 
522 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
523 
524 	return 0;
525 }
526 
527 /**
528  * radeon_vram_location - try to find VRAM location
529  * @rdev: radeon device structure holding all necessary informations
530  * @mc: memory controller structure holding memory informations
531  * @base: base address at which to put VRAM
532  *
533  * Function will try to place VRAM at base address provided
534  * as parameter (which is so far either PCI aperture address or
535  * for IGP TOM base address).
536  *
537  * If there is not enough space to fit the unvisible VRAM in the 32bits
538  * address space then we limit the VRAM size to the aperture.
539  *
540  * If we are using AGP and if the AGP aperture doesn't allow us to have
541  * room for all the VRAM than we restrict the VRAM to the PCI aperture
542  * size and print a warning.
543  *
544  * This function will never fails, worst case are limiting VRAM.
545  *
546  * Note: GTT start, end, size should be initialized before calling this
547  * function on AGP platform.
548  *
549  * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
550  * this shouldn't be a problem as we are using the PCI aperture as a reference.
551  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
552  * not IGP.
553  *
554  * Note 2: we use mc_vram_size as on some board we need to program the mc to
555  * cover the whole aperture even if VRAM size is inferior to aperture size
556  * Novell bug 204882 + along with lots of ubuntu ones
557  *
558  * Note 3: when limiting vram it's safe to overwrite real_vram_size because
559  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
560  * not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
561  * ones)
562  *
563  * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
564  * explicitly check for that thought.
565  *
566  * FIXME: when reducing VRAM size, align new size on power of 2.
567  */
568 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
569 {
570 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
571 
572 	mc->vram_start = base;
573 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
574 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
575 		mc->real_vram_size = mc->aper_size;
576 		mc->mc_vram_size = mc->aper_size;
577 	}
578 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
579 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
580 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
581 		mc->real_vram_size = mc->aper_size;
582 		mc->mc_vram_size = mc->aper_size;
583 	}
584 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
585 	if (limit && limit < mc->real_vram_size)
586 		mc->real_vram_size = limit;
587 	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
588 			mc->mc_vram_size >> 20, mc->vram_start,
589 			mc->vram_end, mc->real_vram_size >> 20);
590 }
591 
592 /**
593  * radeon_gtt_location - try to find GTT location
594  * @rdev: radeon device structure holding all necessary informations
595  * @mc: memory controller structure holding memory informations
596  *
597  * Function will try to place GTT before or after VRAM.
598  *
599  * If GTT size is bigger than space left then we ajust GTT size.
600  * Thus function will never fails.
601  *
602  * FIXME: when reducing GTT size align new size on power of 2.
603  */
604 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
605 {
606 	u64 size_af, size_bf;
607 
608 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
609 	size_bf = mc->vram_start & ~mc->gtt_base_align;
610 	if (size_bf > size_af) {
611 		if (mc->gtt_size > size_bf) {
612 			dev_warn(rdev->dev, "limiting GTT\n");
613 			mc->gtt_size = size_bf;
614 		}
615 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
616 	} else {
617 		if (mc->gtt_size > size_af) {
618 			dev_warn(rdev->dev, "limiting GTT\n");
619 			mc->gtt_size = size_af;
620 		}
621 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
622 	}
623 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
624 	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
625 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
626 }
627 
628 /*
629  * GPU helpers function.
630  */
631 
632 /*
633  * radeon_device_is_virtual - check if we are running is a virtual environment
634  *
635  * Check if the asic has been passed through to a VM (all asics).
636  * Used at driver startup.
637  * Returns true if virtual or false if not.
638  */
639 bool radeon_device_is_virtual(void)
640 {
641 #ifdef CONFIG_X86
642 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
643 #else
644 	return false;
645 #endif
646 }
647 
648 /**
649  * radeon_card_posted - check if the hw has already been initialized
650  *
651  * @rdev: radeon_device pointer
652  *
653  * Check if the asic has been initialized (all asics).
654  * Used at driver startup.
655  * Returns true if initialized or false if not.
656  */
657 bool radeon_card_posted(struct radeon_device *rdev)
658 {
659 	uint32_t reg;
660 
661 	/* for pass through, always force asic_init for CI */
662 	if (rdev->family >= CHIP_BONAIRE &&
663 	    radeon_device_is_virtual())
664 		return false;
665 
666 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
667 	if (efi_enabled(EFI_BOOT) &&
668 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
669 	    (rdev->family < CHIP_R600))
670 		return false;
671 
672 	if (ASIC_IS_NODCE(rdev))
673 		goto check_memsize;
674 
675 	/* first check CRTCs */
676 	if (ASIC_IS_DCE4(rdev)) {
677 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
678 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
679 			if (rdev->num_crtc >= 4) {
680 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
681 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
682 			}
683 			if (rdev->num_crtc >= 6) {
684 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
685 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
686 			}
687 		if (reg & EVERGREEN_CRTC_MASTER_EN)
688 			return true;
689 	} else if (ASIC_IS_AVIVO(rdev)) {
690 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
691 		      RREG32(AVIVO_D2CRTC_CONTROL);
692 		if (reg & AVIVO_CRTC_EN) {
693 			return true;
694 		}
695 	} else {
696 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
697 		      RREG32(RADEON_CRTC2_GEN_CNTL);
698 		if (reg & RADEON_CRTC_EN) {
699 			return true;
700 		}
701 	}
702 
703 check_memsize:
704 	/* then check MEM_SIZE, in case the crtcs are off */
705 	if (rdev->family >= CHIP_R600)
706 		reg = RREG32(R600_CONFIG_MEMSIZE);
707 	else
708 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
709 
710 	if (reg)
711 		return true;
712 
713 	return false;
714 
715 }
716 
717 /**
718  * radeon_update_bandwidth_info - update display bandwidth params
719  *
720  * @rdev: radeon_device pointer
721  *
722  * Used when sclk/mclk are switched or display modes are set.
723  * params are used to calculate display watermarks (all asics)
724  */
725 void radeon_update_bandwidth_info(struct radeon_device *rdev)
726 {
727 	fixed20_12 a;
728 	u32 sclk = rdev->pm.current_sclk;
729 	u32 mclk = rdev->pm.current_mclk;
730 
731 	/* sclk/mclk in Mhz */
732 	a.full = dfixed_const(100);
733 	rdev->pm.sclk.full = dfixed_const(sclk);
734 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
735 	rdev->pm.mclk.full = dfixed_const(mclk);
736 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
737 
738 	if (rdev->flags & RADEON_IS_IGP) {
739 		a.full = dfixed_const(16);
740 		/* core_bandwidth = sclk(Mhz) * 16 */
741 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
742 	}
743 }
744 
745 /**
746  * radeon_boot_test_post_card - check and possibly initialize the hw
747  *
748  * @rdev: radeon_device pointer
749  *
750  * Check if the asic is initialized and if not, attempt to initialize
751  * it (all asics).
752  * Returns true if initialized or false if not.
753  */
754 bool radeon_boot_test_post_card(struct radeon_device *rdev)
755 {
756 	if (radeon_card_posted(rdev))
757 		return true;
758 
759 	if (rdev->bios) {
760 		DRM_INFO("GPU not posted. posting now...\n");
761 		if (rdev->is_atom_bios)
762 			atom_asic_init(rdev->mode_info.atom_context);
763 		else
764 			radeon_combios_asic_init(rdev_to_drm(rdev));
765 		return true;
766 	} else {
767 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
768 		return false;
769 	}
770 }
771 
772 /**
773  * radeon_dummy_page_init - init dummy page used by the driver
774  *
775  * @rdev: radeon_device pointer
776  *
777  * Allocate the dummy page used by the driver (all asics).
778  * This dummy page is used by the driver as a filler for gart entries
779  * when pages are taken out of the GART
780  * Returns 0 on sucess, -ENOMEM on failure.
781  */
782 int radeon_dummy_page_init(struct radeon_device *rdev)
783 {
784 	if (rdev->dummy_page.page)
785 		return 0;
786 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
787 	if (rdev->dummy_page.page == NULL)
788 		return -ENOMEM;
789 	rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
790 					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
791 	if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
792 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
793 		__free_page(rdev->dummy_page.page);
794 		rdev->dummy_page.page = NULL;
795 		return -ENOMEM;
796 	}
797 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
798 							    RADEON_GART_PAGE_DUMMY);
799 	return 0;
800 }
801 
802 /**
803  * radeon_dummy_page_fini - free dummy page used by the driver
804  *
805  * @rdev: radeon_device pointer
806  *
807  * Frees the dummy page used by the driver (all asics).
808  */
809 void radeon_dummy_page_fini(struct radeon_device *rdev)
810 {
811 	if (rdev->dummy_page.page == NULL)
812 		return;
813 	dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
814 		       DMA_BIDIRECTIONAL);
815 	__free_page(rdev->dummy_page.page);
816 	rdev->dummy_page.page = NULL;
817 }
818 
819 
820 /* ATOM accessor methods */
821 /*
822  * ATOM is an interpreted byte code stored in tables in the vbios.  The
823  * driver registers callbacks to access registers and the interpreter
824  * in the driver parses the tables and executes then to program specific
825  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
826  * atombios.h, and atom.c
827  */
828 
829 /**
830  * cail_pll_read - read PLL register
831  *
832  * @info: atom card_info pointer
833  * @reg: PLL register offset
834  *
835  * Provides a PLL register accessor for the atom interpreter (r4xx+).
836  * Returns the value of the PLL register.
837  */
838 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
839 {
840 	struct radeon_device *rdev = info->dev->dev_private;
841 	uint32_t r;
842 
843 	r = rdev->pll_rreg(rdev, reg);
844 	return r;
845 }
846 
847 /**
848  * cail_pll_write - write PLL register
849  *
850  * @info: atom card_info pointer
851  * @reg: PLL register offset
852  * @val: value to write to the pll register
853  *
854  * Provides a PLL register accessor for the atom interpreter (r4xx+).
855  */
856 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
857 {
858 	struct radeon_device *rdev = info->dev->dev_private;
859 
860 	rdev->pll_wreg(rdev, reg, val);
861 }
862 
863 /**
864  * cail_mc_read - read MC (Memory Controller) register
865  *
866  * @info: atom card_info pointer
867  * @reg: MC register offset
868  *
869  * Provides an MC register accessor for the atom interpreter (r4xx+).
870  * Returns the value of the MC register.
871  */
872 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
873 {
874 	struct radeon_device *rdev = info->dev->dev_private;
875 	uint32_t r;
876 
877 	r = rdev->mc_rreg(rdev, reg);
878 	return r;
879 }
880 
881 /**
882  * cail_mc_write - write MC (Memory Controller) register
883  *
884  * @info: atom card_info pointer
885  * @reg: MC register offset
886  * @val: value to write to the pll register
887  *
888  * Provides a MC register accessor for the atom interpreter (r4xx+).
889  */
890 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
891 {
892 	struct radeon_device *rdev = info->dev->dev_private;
893 
894 	rdev->mc_wreg(rdev, reg, val);
895 }
896 
897 /**
898  * cail_reg_write - write MMIO register
899  *
900  * @info: atom card_info pointer
901  * @reg: MMIO register offset
902  * @val: value to write to the pll register
903  *
904  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
905  */
906 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
907 {
908 	struct radeon_device *rdev = info->dev->dev_private;
909 
910 	WREG32(reg*4, val);
911 }
912 
913 /**
914  * cail_reg_read - read MMIO register
915  *
916  * @info: atom card_info pointer
917  * @reg: MMIO register offset
918  *
919  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
920  * Returns the value of the MMIO register.
921  */
922 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
923 {
924 	struct radeon_device *rdev = info->dev->dev_private;
925 	uint32_t r;
926 
927 	r = RREG32(reg*4);
928 	return r;
929 }
930 
931 /**
932  * cail_ioreg_write - write IO register
933  *
934  * @info: atom card_info pointer
935  * @reg: IO register offset
936  * @val: value to write to the pll register
937  *
938  * Provides a IO register accessor for the atom interpreter (r4xx+).
939  */
940 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
941 {
942 	struct radeon_device *rdev = info->dev->dev_private;
943 
944 	WREG32_IO(reg*4, val);
945 }
946 
947 /**
948  * cail_ioreg_read - read IO register
949  *
950  * @info: atom card_info pointer
951  * @reg: IO register offset
952  *
953  * Provides an IO register accessor for the atom interpreter (r4xx+).
954  * Returns the value of the IO register.
955  */
956 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
957 {
958 	struct radeon_device *rdev = info->dev->dev_private;
959 	uint32_t r;
960 
961 	r = RREG32_IO(reg*4);
962 	return r;
963 }
964 
965 /**
966  * radeon_atombios_init - init the driver info and callbacks for atombios
967  *
968  * @rdev: radeon_device pointer
969  *
970  * Initializes the driver info and register access callbacks for the
971  * ATOM interpreter (r4xx+).
972  * Returns 0 on sucess, -ENOMEM on failure.
973  * Called at driver startup.
974  */
975 int radeon_atombios_init(struct radeon_device *rdev)
976 {
977 	struct card_info *atom_card_info =
978 	    kzalloc_obj(struct card_info);
979 
980 	if (!atom_card_info)
981 		return -ENOMEM;
982 
983 	rdev->mode_info.atom_card_info = atom_card_info;
984 	atom_card_info->dev = rdev_to_drm(rdev);
985 	atom_card_info->reg_read = cail_reg_read;
986 	atom_card_info->reg_write = cail_reg_write;
987 	/* needed for iio ops */
988 	if (rdev->rio_mem) {
989 		atom_card_info->ioreg_read = cail_ioreg_read;
990 		atom_card_info->ioreg_write = cail_ioreg_write;
991 	} else {
992 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
993 		atom_card_info->ioreg_read = cail_reg_read;
994 		atom_card_info->ioreg_write = cail_reg_write;
995 	}
996 	atom_card_info->mc_read = cail_mc_read;
997 	atom_card_info->mc_write = cail_mc_write;
998 	atom_card_info->pll_read = cail_pll_read;
999 	atom_card_info->pll_write = cail_pll_write;
1000 
1001 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1002 	if (!rdev->mode_info.atom_context) {
1003 		radeon_atombios_fini(rdev);
1004 		return -ENOMEM;
1005 	}
1006 
1007 	mutex_init(&rdev->mode_info.atom_context->mutex);
1008 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1009 	radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1010 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1011 	return 0;
1012 }
1013 
1014 /**
1015  * radeon_atombios_fini - free the driver info and callbacks for atombios
1016  *
1017  * @rdev: radeon_device pointer
1018  *
1019  * Frees the driver info and register access callbacks for the ATOM
1020  * interpreter (r4xx+).
1021  * Called at driver shutdown.
1022  */
1023 void radeon_atombios_fini(struct radeon_device *rdev)
1024 {
1025 	if (rdev->mode_info.atom_context) {
1026 		kfree(rdev->mode_info.atom_context->scratch);
1027 		kfree(rdev->mode_info.atom_context->iio);
1028 	}
1029 	kfree(rdev->mode_info.atom_context);
1030 	rdev->mode_info.atom_context = NULL;
1031 	kfree(rdev->mode_info.atom_card_info);
1032 	rdev->mode_info.atom_card_info = NULL;
1033 }
1034 
1035 /* COMBIOS */
1036 /*
1037  * COMBIOS is the bios format prior to ATOM. It provides
1038  * command tables similar to ATOM, but doesn't have a unified
1039  * parser.  See radeon_combios.c
1040  */
1041 
1042 /**
1043  * radeon_combios_init - init the driver info for combios
1044  *
1045  * @rdev: radeon_device pointer
1046  *
1047  * Initializes the driver info for combios (r1xx-r3xx).
1048  * Returns 0 on sucess.
1049  * Called at driver startup.
1050  */
1051 int radeon_combios_init(struct radeon_device *rdev)
1052 {
1053 	radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1054 	return 0;
1055 }
1056 
1057 /**
1058  * radeon_combios_fini - free the driver info for combios
1059  *
1060  * @rdev: radeon_device pointer
1061  *
1062  * Frees the driver info for combios (r1xx-r3xx).
1063  * Called at driver shutdown.
1064  */
1065 void radeon_combios_fini(struct radeon_device *rdev)
1066 {
1067 }
1068 
1069 /* if we get transitioned to only one device, take VGA back */
1070 /**
1071  * radeon_vga_set_decode - enable/disable vga decode
1072  *
1073  * @pdev: PCI device
1074  * @state: enable/disable vga decode
1075  *
1076  * Enable/disable vga decode (all asics).
1077  * Returns VGA resource flags.
1078  */
1079 static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1080 {
1081 	struct drm_device *dev = pci_get_drvdata(pdev);
1082 	struct radeon_device *rdev = dev->dev_private;
1083 	radeon_vga_set_state(rdev, state);
1084 	if (state)
1085 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1086 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087 	else
1088 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1089 }
1090 
1091 /**
1092  * radeon_gart_size_auto - Determine a sensible default GART size
1093  *                         according to ASIC family.
1094  *
1095  * @family: ASIC family name
1096  */
1097 static int radeon_gart_size_auto(enum radeon_family family)
1098 {
1099 	/* default to a larger gart size on newer asics */
1100 	if (family >= CHIP_TAHITI)
1101 		return 2048;
1102 	else if (family >= CHIP_RV770)
1103 		return 1024;
1104 	else
1105 		return 512;
1106 }
1107 
1108 /**
1109  * radeon_check_arguments - validate module params
1110  *
1111  * @rdev: radeon_device pointer
1112  *
1113  * Validates certain module parameters and updates
1114  * the associated values used by the driver (all asics).
1115  */
1116 static void radeon_check_arguments(struct radeon_device *rdev)
1117 {
1118 	/* vramlimit must be a power of two */
1119 	if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
1120 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1121 				radeon_vram_limit);
1122 		radeon_vram_limit = 0;
1123 	}
1124 
1125 	if (radeon_gart_size == -1) {
1126 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1127 	}
1128 	/* gtt size must be power of two and greater or equal to 32M */
1129 	if (radeon_gart_size < 32) {
1130 		dev_warn(rdev->dev, "gart size (%d) too small\n",
1131 				radeon_gart_size);
1132 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1133 	} else if (!is_power_of_2(radeon_gart_size)) {
1134 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1135 				radeon_gart_size);
1136 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1137 	}
1138 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1139 
1140 	/* AGP mode can only be -1, 1, 2, 4, 8 */
1141 	switch (radeon_agpmode) {
1142 	case -1:
1143 	case 0:
1144 	case 1:
1145 	case 2:
1146 	case 4:
1147 	case 8:
1148 		break;
1149 	default:
1150 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1151 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1152 		radeon_agpmode = 0;
1153 		break;
1154 	}
1155 
1156 	if (!is_power_of_2(radeon_vm_size)) {
1157 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1158 			 radeon_vm_size);
1159 		radeon_vm_size = 4;
1160 	}
1161 
1162 	if (radeon_vm_size < 1) {
1163 		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1164 			 radeon_vm_size);
1165 		radeon_vm_size = 4;
1166 	}
1167 
1168 	/*
1169 	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1170 	 */
1171 	if (radeon_vm_size > 1024) {
1172 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1173 			 radeon_vm_size);
1174 		radeon_vm_size = 4;
1175 	}
1176 
1177 	/* defines number of bits in page table versus page directory,
1178 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1179 	 * page table and the remaining bits are in the page directory */
1180 	if (radeon_vm_block_size == -1) {
1181 
1182 		/* Total bits covered by PD + PTs */
1183 		unsigned bits = ilog2(radeon_vm_size) + 18;
1184 
1185 		/* Make sure the PD is 4K in size up to 8GB address space.
1186 		   Above that split equal between PD and PTs */
1187 		if (radeon_vm_size <= 8)
1188 			radeon_vm_block_size = bits - 9;
1189 		else
1190 			radeon_vm_block_size = (bits + 3) / 2;
1191 
1192 	} else if (radeon_vm_block_size < 9) {
1193 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1194 			 radeon_vm_block_size);
1195 		radeon_vm_block_size = 9;
1196 	}
1197 
1198 	if (radeon_vm_block_size > 24 ||
1199 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1200 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1201 			 radeon_vm_block_size);
1202 		radeon_vm_block_size = 9;
1203 	}
1204 }
1205 
1206 /**
1207  * radeon_switcheroo_set_state - set switcheroo state
1208  *
1209  * @pdev: pci dev pointer
1210  * @state: vga_switcheroo state
1211  *
1212  * Callback for the switcheroo driver.  Suspends or resumes
1213  * the asics before or after it is powered up using ACPI methods.
1214  */
1215 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1216 {
1217 	struct drm_device *dev = pci_get_drvdata(pdev);
1218 
1219 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1220 		return;
1221 
1222 	if (state == VGA_SWITCHEROO_ON) {
1223 		pr_info("radeon: switched on\n");
1224 		/* don't suspend or resume card normally */
1225 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1226 
1227 		radeon_resume_kms(dev, true, true);
1228 
1229 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1230 		drm_kms_helper_poll_enable(dev);
1231 	} else {
1232 		pr_info("radeon: switched off\n");
1233 		drm_kms_helper_poll_disable(dev);
1234 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1235 		radeon_suspend_kms(dev, true, true, false);
1236 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1237 	}
1238 }
1239 
1240 /**
1241  * radeon_switcheroo_can_switch - see if switcheroo state can change
1242  *
1243  * @pdev: pci dev pointer
1244  *
1245  * Callback for the switcheroo driver.  Check of the switcheroo
1246  * state can be changed.
1247  * Returns true if the state can be changed, false if not.
1248  */
1249 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1250 {
1251 	struct drm_device *dev = pci_get_drvdata(pdev);
1252 
1253 	/*
1254 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1255 	 * locking inversion with the driver load path. And the access here is
1256 	 * completely racy anyway. So don't bother with locking for now.
1257 	 */
1258 	return atomic_read(&dev->open_count) == 0;
1259 }
1260 
1261 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1262 	.set_gpu_state = radeon_switcheroo_set_state,
1263 	.reprobe = NULL,
1264 	.can_switch = radeon_switcheroo_can_switch,
1265 };
1266 
1267 /**
1268  * radeon_device_init - initialize the driver
1269  *
1270  * @rdev: radeon_device pointer
1271  * @ddev: drm dev pointer
1272  * @pdev: pci dev pointer
1273  * @flags: driver flags
1274  *
1275  * Initializes the driver info and hw (all asics).
1276  * Returns 0 for success or an error on failure.
1277  * Called at driver startup.
1278  */
1279 int radeon_device_init(struct radeon_device *rdev,
1280 		       struct drm_device *ddev,
1281 		       struct pci_dev *pdev,
1282 		       uint32_t flags)
1283 {
1284 	int r, i;
1285 	int dma_bits;
1286 	bool runtime = false;
1287 
1288 	rdev->shutdown = false;
1289 	rdev->flags = flags;
1290 	rdev->family = flags & RADEON_FAMILY_MASK;
1291 	rdev->is_atom_bios = false;
1292 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1293 	rdev->mc.gtt_size = 512 * 1024 * 1024;
1294 	rdev->accel_working = false;
1295 	/* set up ring ids */
1296 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1297 		rdev->ring[i].idx = i;
1298 	}
1299 	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1300 
1301 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1302 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1303 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1304 
1305 	/* mutex initialization are all done here so we
1306 	 * can recall function without having locking issues */
1307 	mutex_init(&rdev->ring_lock);
1308 	mutex_init(&rdev->dc_hw_i2c_mutex);
1309 	atomic_set(&rdev->ih.lock, 0);
1310 	mutex_init(&rdev->gem.mutex);
1311 	mutex_init(&rdev->pm.mutex);
1312 	mutex_init(&rdev->gpu_clock_mutex);
1313 	mutex_init(&rdev->srbm_mutex);
1314 	mutex_init(&rdev->audio.component_mutex);
1315 	init_rwsem(&rdev->pm.mclk_lock);
1316 	init_rwsem(&rdev->exclusive_lock);
1317 	init_waitqueue_head(&rdev->irq.vblank_queue);
1318 	r = radeon_gem_init(rdev);
1319 	if (r)
1320 		return r;
1321 
1322 	radeon_check_arguments(rdev);
1323 	/* Adjust VM size here.
1324 	 * Max GPUVM size for cayman+ is 40 bits.
1325 	 */
1326 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1327 
1328 	/* Set asic functions */
1329 	r = radeon_asic_init(rdev);
1330 	if (r)
1331 		return r;
1332 
1333 	/* all of the newer IGP chips have an internal gart
1334 	 * However some rs4xx report as AGP, so remove that here.
1335 	 */
1336 	if ((rdev->family >= CHIP_RS400) &&
1337 	    (rdev->flags & RADEON_IS_IGP)) {
1338 		rdev->flags &= ~RADEON_IS_AGP;
1339 	}
1340 
1341 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1342 		radeon_agp_disable(rdev);
1343 	}
1344 
1345 	/* Set the internal MC address mask
1346 	 * This is the max address of the GPU's
1347 	 * internal address space.
1348 	 */
1349 	if (rdev->family >= CHIP_CAYMAN)
1350 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1351 	else if (rdev->family >= CHIP_CEDAR)
1352 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1353 	else
1354 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1355 
1356 	/* set DMA mask.
1357 	 * PCIE - can handle 40-bits.
1358 	 * IGP - can handle 40-bits
1359 	 * AGP - generally dma32 is safest
1360 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1361 	 */
1362 	dma_bits = 40;
1363 	if (rdev->flags & RADEON_IS_AGP)
1364 		dma_bits = 32;
1365 	if ((rdev->flags & RADEON_IS_PCI) &&
1366 	    (rdev->family <= CHIP_RS740))
1367 		dma_bits = 32;
1368 #ifdef CONFIG_PPC64
1369 	if (rdev->family == CHIP_CEDAR)
1370 		dma_bits = 32;
1371 #endif
1372 
1373 	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1374 	if (r) {
1375 		pr_warn("radeon: No suitable DMA available\n");
1376 		return r;
1377 	}
1378 	rdev->pdev->msi_addr_mask = DMA_BIT_MASK(dma_bits);
1379 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1380 
1381 	/* Registers mapping */
1382 	/* TODO: block userspace mapping of io register */
1383 	spin_lock_init(&rdev->mmio_idx_lock);
1384 	spin_lock_init(&rdev->smc_idx_lock);
1385 	spin_lock_init(&rdev->pll_idx_lock);
1386 	spin_lock_init(&rdev->mc_idx_lock);
1387 	spin_lock_init(&rdev->pcie_idx_lock);
1388 	spin_lock_init(&rdev->pciep_idx_lock);
1389 	spin_lock_init(&rdev->pif_idx_lock);
1390 	spin_lock_init(&rdev->cg_idx_lock);
1391 	spin_lock_init(&rdev->uvd_idx_lock);
1392 	spin_lock_init(&rdev->rcu_idx_lock);
1393 	spin_lock_init(&rdev->didt_idx_lock);
1394 	spin_lock_init(&rdev->end_idx_lock);
1395 	if (rdev->family >= CHIP_BONAIRE) {
1396 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1397 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1398 	} else {
1399 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1400 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1401 	}
1402 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1403 	if (rdev->rmmio == NULL)
1404 		return -ENOMEM;
1405 
1406 	/* doorbell bar mapping */
1407 	if (rdev->family >= CHIP_BONAIRE)
1408 		radeon_doorbell_init(rdev);
1409 
1410 	/* io port mapping */
1411 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1412 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1413 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1414 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1415 			break;
1416 		}
1417 	}
1418 	if (rdev->rio_mem == NULL)
1419 		DRM_ERROR("Unable to find PCI I/O BAR\n");
1420 
1421 	if (rdev->flags & RADEON_IS_PX)
1422 		radeon_device_handle_px_quirks(rdev);
1423 
1424 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
1425 	/* this will fail for cards that aren't VGA class devices, just
1426 	 * ignore it */
1427 	vga_client_register(rdev->pdev, radeon_vga_set_decode);
1428 
1429 	if (rdev->flags & RADEON_IS_PX)
1430 		runtime = true;
1431 	if (!pci_is_thunderbolt_attached(rdev->pdev))
1432 		vga_switcheroo_register_client(rdev->pdev,
1433 					       &radeon_switcheroo_ops, runtime);
1434 	if (runtime)
1435 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1436 
1437 	r = radeon_init(rdev);
1438 	if (r)
1439 		goto failed;
1440 
1441 	radeon_gem_debugfs_init(rdev);
1442 
1443 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1444 		/* Acceleration not working on AGP card try again
1445 		 * with fallback to PCI or PCIE GART
1446 		 */
1447 		radeon_asic_reset(rdev);
1448 		radeon_fini(rdev);
1449 		radeon_agp_disable(rdev);
1450 		r = radeon_init(rdev);
1451 		if (r)
1452 			goto failed;
1453 	}
1454 
1455 	radeon_audio_component_init(rdev);
1456 
1457 	r = radeon_ib_ring_tests(rdev);
1458 	if (r)
1459 		DRM_ERROR("ib ring test failed (%d).\n", r);
1460 
1461 	/*
1462 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1463 	 * after the CP ring have chew one packet at least. Hence here we stop
1464 	 * and restart DPM after the radeon_ib_ring_tests().
1465 	 */
1466 	if (rdev->pm.dpm_enabled &&
1467 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
1468 	    (rdev->family == CHIP_TURKS) &&
1469 	    (rdev->flags & RADEON_IS_MOBILITY)) {
1470 		mutex_lock(&rdev->pm.mutex);
1471 		radeon_dpm_disable(rdev);
1472 		radeon_dpm_enable(rdev);
1473 		mutex_unlock(&rdev->pm.mutex);
1474 	}
1475 
1476 	if ((radeon_testing & 1)) {
1477 		if (rdev->accel_working)
1478 			radeon_test_moves(rdev);
1479 		else
1480 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1481 	}
1482 	if ((radeon_testing & 2)) {
1483 		if (rdev->accel_working)
1484 			radeon_test_syncing(rdev);
1485 		else
1486 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1487 	}
1488 	if (radeon_benchmarking) {
1489 		if (rdev->accel_working)
1490 			radeon_benchmark(rdev, radeon_benchmarking);
1491 		else
1492 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1493 	}
1494 	return 0;
1495 
1496 failed:
1497 	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1498 	if (radeon_is_px(ddev))
1499 		pm_runtime_put_noidle(ddev->dev);
1500 	if (runtime)
1501 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1502 	return r;
1503 }
1504 
1505 /**
1506  * radeon_device_fini - tear down the driver
1507  *
1508  * @rdev: radeon_device pointer
1509  *
1510  * Tear down the driver info (all asics).
1511  * Called at driver shutdown.
1512  */
1513 void radeon_device_fini(struct radeon_device *rdev)
1514 {
1515 	DRM_INFO("radeon: finishing device.\n");
1516 	rdev->shutdown = true;
1517 	/* evict vram memory */
1518 	radeon_bo_evict_vram(rdev);
1519 	radeon_audio_component_fini(rdev);
1520 	radeon_fini(rdev);
1521 	if (!pci_is_thunderbolt_attached(rdev->pdev))
1522 		vga_switcheroo_unregister_client(rdev->pdev);
1523 	if (rdev->flags & RADEON_IS_PX)
1524 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1525 	vga_client_unregister(rdev->pdev);
1526 	if (rdev->rio_mem)
1527 		pci_iounmap(rdev->pdev, rdev->rio_mem);
1528 	rdev->rio_mem = NULL;
1529 	iounmap(rdev->rmmio);
1530 	rdev->rmmio = NULL;
1531 	if (rdev->family >= CHIP_BONAIRE)
1532 		radeon_doorbell_fini(rdev);
1533 }
1534 
1535 
1536 /*
1537  * Suspend & resume.
1538  */
1539 /*
1540  * radeon_suspend_kms - initiate device suspend
1541  *
1542  * Puts the hw in the suspend state (all asics).
1543  * Returns 0 for success or an error on failure.
1544  * Called at driver suspend.
1545  */
1546 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1547 		       bool notify_clients, bool freeze)
1548 {
1549 	struct radeon_device *rdev;
1550 	struct pci_dev *pdev;
1551 	struct drm_crtc *crtc;
1552 	struct drm_connector *connector;
1553 	int i, r;
1554 
1555 	if (dev == NULL || dev->dev_private == NULL) {
1556 		return -ENODEV;
1557 	}
1558 
1559 	rdev = dev->dev_private;
1560 	pdev = to_pci_dev(dev->dev);
1561 
1562 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1563 		return 0;
1564 
1565 	drm_kms_helper_poll_disable(dev);
1566 
1567 	drm_modeset_lock_all(dev);
1568 	/* turn off display hw */
1569 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1570 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1571 	}
1572 	drm_modeset_unlock_all(dev);
1573 
1574 	/* unpin the front buffers and cursors */
1575 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1576 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1577 		struct drm_framebuffer *fb = crtc->primary->fb;
1578 
1579 		if (radeon_crtc->cursor_bo) {
1580 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1581 			r = radeon_bo_reserve(robj, false);
1582 			if (r == 0) {
1583 				radeon_bo_unpin(robj);
1584 				radeon_bo_unreserve(robj);
1585 			}
1586 		}
1587 
1588 		if (fb == NULL || fb->obj[0] == NULL) {
1589 			continue;
1590 		}
1591 		/* don't unpin kernel fb objects */
1592 		if (!drm_fb_helper_gem_is_fb(dev->fb_helper, fb->obj[0])) {
1593 			struct radeon_bo *robj = gem_to_radeon_bo(fb->obj[0]);
1594 
1595 			r = radeon_bo_reserve(robj, false);
1596 			if (r == 0) {
1597 				radeon_bo_unpin(robj);
1598 				radeon_bo_unreserve(robj);
1599 			}
1600 		}
1601 	}
1602 	/* evict vram memory */
1603 	radeon_bo_evict_vram(rdev);
1604 
1605 	/* wait for gpu to finish processing current batch */
1606 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1607 		r = radeon_fence_wait_empty(rdev, i);
1608 		if (r) {
1609 			/* delay GPU reset to resume */
1610 			radeon_fence_driver_force_completion(rdev, i);
1611 		} else {
1612 			/* finish executing delayed work */
1613 			flush_delayed_work(&rdev->fence_drv[i].lockup_work);
1614 		}
1615 	}
1616 
1617 	radeon_save_bios_scratch_regs(rdev);
1618 
1619 	radeon_suspend(rdev);
1620 	radeon_hpd_fini(rdev);
1621 	/* evict remaining vram memory
1622 	 * This second call to evict vram is to evict the gart page table
1623 	 * using the CPU.
1624 	 */
1625 	radeon_bo_evict_vram(rdev);
1626 
1627 	radeon_agp_suspend(rdev);
1628 
1629 	pci_save_state(pdev);
1630 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1631 		rdev->asic->asic_reset(rdev, true);
1632 		pci_restore_state(pdev);
1633 	} else if (suspend) {
1634 		/* Shut down the device */
1635 		pci_disable_device(pdev);
1636 		pci_set_power_state(pdev, PCI_D3hot);
1637 	}
1638 
1639 	if (notify_clients)
1640 		drm_client_dev_suspend(dev);
1641 
1642 	return 0;
1643 }
1644 
1645 /*
1646  * radeon_resume_kms - initiate device resume
1647  *
1648  * Bring the hw back to operating state (all asics).
1649  * Returns 0 for success or an error on failure.
1650  * Called at driver resume.
1651  */
1652 int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
1653 {
1654 	struct drm_connector *connector;
1655 	struct radeon_device *rdev = dev->dev_private;
1656 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1657 	struct drm_crtc *crtc;
1658 	int r;
1659 
1660 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1661 		return 0;
1662 
1663 	if (resume) {
1664 		pci_set_power_state(pdev, PCI_D0);
1665 		pci_restore_state(pdev);
1666 		if (pci_enable_device(pdev))
1667 			return -1;
1668 	}
1669 	/* resume AGP if in use */
1670 	radeon_agp_resume(rdev);
1671 	radeon_resume(rdev);
1672 
1673 	r = radeon_ib_ring_tests(rdev);
1674 	if (r)
1675 		DRM_ERROR("ib ring test failed (%d).\n", r);
1676 
1677 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1678 		/* do dpm late init */
1679 		r = radeon_pm_late_init(rdev);
1680 		if (r) {
1681 			rdev->pm.dpm_enabled = false;
1682 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1683 		}
1684 	} else {
1685 		/* resume old pm late */
1686 		radeon_pm_resume(rdev);
1687 	}
1688 
1689 	radeon_restore_bios_scratch_regs(rdev);
1690 
1691 	/* pin cursors */
1692 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1693 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1694 
1695 		if (radeon_crtc->cursor_bo) {
1696 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1697 			r = radeon_bo_reserve(robj, false);
1698 			if (r == 0) {
1699 				/* Only 27 bit offset for legacy cursor */
1700 				r = radeon_bo_pin_restricted(robj,
1701 							     RADEON_GEM_DOMAIN_VRAM,
1702 							     ASIC_IS_AVIVO(rdev) ?
1703 							     0 : 1 << 27,
1704 							     &radeon_crtc->cursor_addr);
1705 				if (r != 0)
1706 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1707 				radeon_bo_unreserve(robj);
1708 			}
1709 		}
1710 	}
1711 
1712 	/* init dig PHYs, disp eng pll */
1713 	if (rdev->is_atom_bios) {
1714 		radeon_atom_encoder_init(rdev);
1715 		radeon_atom_disp_eng_pll_init(rdev);
1716 		/* turn on the BL */
1717 		if (rdev->mode_info.bl_encoder) {
1718 			u8 bl_level = radeon_get_backlight_level(rdev,
1719 								 rdev->mode_info.bl_encoder);
1720 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1721 						   bl_level);
1722 		}
1723 	}
1724 	/* reset hpd state */
1725 	radeon_hpd_init(rdev);
1726 	/* blat the mode back in */
1727 	if (notify_clients) {
1728 		drm_helper_resume_force_mode(dev);
1729 		/* turn on display hw */
1730 		drm_modeset_lock_all(dev);
1731 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1732 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1733 		}
1734 		drm_modeset_unlock_all(dev);
1735 	}
1736 
1737 	drm_kms_helper_poll_enable(dev);
1738 
1739 	/* set the power state here in case we are a PX system or headless */
1740 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1741 		radeon_pm_compute_clocks(rdev);
1742 
1743 	if (notify_clients)
1744 		drm_client_dev_resume(dev);
1745 
1746 	return 0;
1747 }
1748 
1749 /**
1750  * radeon_gpu_reset - reset the asic
1751  *
1752  * @rdev: radeon device pointer
1753  *
1754  * Attempt the reset the GPU if it has hung (all asics).
1755  * Returns 0 for success or an error on failure.
1756  */
1757 int radeon_gpu_reset(struct radeon_device *rdev)
1758 {
1759 	unsigned ring_sizes[RADEON_NUM_RINGS];
1760 	uint32_t *ring_data[RADEON_NUM_RINGS];
1761 
1762 	bool saved = false;
1763 
1764 	int i, r;
1765 
1766 	down_write(&rdev->exclusive_lock);
1767 
1768 	if (!rdev->needs_reset) {
1769 		up_write(&rdev->exclusive_lock);
1770 		return 0;
1771 	}
1772 
1773 	atomic_inc(&rdev->gpu_reset_counter);
1774 
1775 	radeon_save_bios_scratch_regs(rdev);
1776 	radeon_suspend(rdev);
1777 	radeon_hpd_fini(rdev);
1778 
1779 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1780 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1781 						   &ring_data[i]);
1782 		if (ring_sizes[i]) {
1783 			saved = true;
1784 			dev_info(rdev->dev, "Saved %d dwords of commands "
1785 				 "on ring %d.\n", ring_sizes[i], i);
1786 		}
1787 	}
1788 
1789 	r = radeon_asic_reset(rdev);
1790 	if (!r) {
1791 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1792 		radeon_resume(rdev);
1793 	}
1794 
1795 	radeon_restore_bios_scratch_regs(rdev);
1796 
1797 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1798 		if (!r && ring_data[i]) {
1799 			radeon_ring_restore(rdev, &rdev->ring[i],
1800 					    ring_sizes[i], ring_data[i]);
1801 		} else {
1802 			radeon_fence_driver_force_completion(rdev, i);
1803 			kfree(ring_data[i]);
1804 		}
1805 	}
1806 
1807 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1808 		/* do dpm late init */
1809 		r = radeon_pm_late_init(rdev);
1810 		if (r) {
1811 			rdev->pm.dpm_enabled = false;
1812 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1813 		}
1814 	} else {
1815 		/* resume old pm late */
1816 		radeon_pm_resume(rdev);
1817 	}
1818 
1819 	/* init dig PHYs, disp eng pll */
1820 	if (rdev->is_atom_bios) {
1821 		radeon_atom_encoder_init(rdev);
1822 		radeon_atom_disp_eng_pll_init(rdev);
1823 		/* turn on the BL */
1824 		if (rdev->mode_info.bl_encoder) {
1825 			u8 bl_level = radeon_get_backlight_level(rdev,
1826 								 rdev->mode_info.bl_encoder);
1827 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1828 						   bl_level);
1829 		}
1830 	}
1831 	/* reset hpd state */
1832 	radeon_hpd_init(rdev);
1833 
1834 	rdev->in_reset = true;
1835 	rdev->needs_reset = false;
1836 
1837 	downgrade_write(&rdev->exclusive_lock);
1838 
1839 	drm_helper_resume_force_mode(rdev_to_drm(rdev));
1840 
1841 	/* set the power state here in case we are a PX system or headless */
1842 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1843 		radeon_pm_compute_clocks(rdev);
1844 
1845 	if (!r) {
1846 		r = radeon_ib_ring_tests(rdev);
1847 		if (r && saved)
1848 			r = -EAGAIN;
1849 	} else {
1850 		/* bad news, how to tell it to userspace ? */
1851 		dev_info(rdev->dev, "GPU reset failed\n");
1852 	}
1853 
1854 	rdev->needs_reset = r == -EAGAIN;
1855 	rdev->in_reset = false;
1856 
1857 	up_read(&rdev->exclusive_lock);
1858 	return r;
1859 }
1860