xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include "gt/intel_gt.h"
7 #include "intel_guc.h"
8 #include "intel_guc_ads.h"
9 #include "intel_guc_submission.h"
10 #include "i915_drv.h"
11 
12 /**
13  * DOC: GuC
14  *
15  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
16  * designed to offload some of the functionality usually performed by the host
17  * driver; currently the main operations it can take care of are:
18  *
19  * - Authentication of the HuC, which is required to fully enable HuC usage.
20  * - Low latency graphics context scheduling (a.k.a. GuC submission).
21  * - GT Power management.
22  *
23  * The enable_guc module parameter can be used to select which of those
24  * operations to enable within GuC. Note that not all the operations are
25  * supported on all gen9+ platforms.
26  *
27  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
28  * if at least one of the operations is selected. However, not loading the GuC
29  * might result in the loss of some features that do require the GuC (currently
30  * just the HuC, but more are expected to land in the future).
31  */
32 
33 static void gen8_guc_raise_irq(struct intel_guc *guc)
34 {
35 	struct intel_gt *gt = guc_to_gt(guc);
36 
37 	intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
38 }
39 
40 static void gen11_guc_raise_irq(struct intel_guc *guc)
41 {
42 	struct intel_gt *gt = guc_to_gt(guc);
43 
44 	intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
45 }
46 
47 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
48 {
49 	GEM_BUG_ON(!guc->send_regs.base);
50 	GEM_BUG_ON(!guc->send_regs.count);
51 	GEM_BUG_ON(i >= guc->send_regs.count);
52 
53 	return _MMIO(guc->send_regs.base + 4 * i);
54 }
55 
56 void intel_guc_init_send_regs(struct intel_guc *guc)
57 {
58 	struct intel_gt *gt = guc_to_gt(guc);
59 	enum forcewake_domains fw_domains = 0;
60 	unsigned int i;
61 
62 	if (INTEL_GEN(gt->i915) >= 11) {
63 		guc->send_regs.base =
64 				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
65 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
66 	} else {
67 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
68 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
69 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
70 	}
71 
72 	for (i = 0; i < guc->send_regs.count; i++) {
73 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
74 					guc_send_reg(guc, i),
75 					FW_REG_READ | FW_REG_WRITE);
76 	}
77 	guc->send_regs.fw_domains = fw_domains;
78 }
79 
80 void intel_guc_init_early(struct intel_guc *guc)
81 {
82 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
83 
84 	intel_guc_fw_init_early(guc);
85 	intel_guc_ct_init_early(&guc->ct);
86 	intel_guc_log_init_early(&guc->log);
87 	intel_guc_submission_init_early(guc);
88 
89 	mutex_init(&guc->send_mutex);
90 	spin_lock_init(&guc->irq_lock);
91 	guc->send = intel_guc_send_nop;
92 	guc->handler = intel_guc_to_host_event_handler_nop;
93 	if (INTEL_GEN(i915) >= 11) {
94 		guc->notify = gen11_guc_raise_irq;
95 		guc->interrupts.reset = gen11_reset_guc_interrupts;
96 		guc->interrupts.enable = gen11_enable_guc_interrupts;
97 		guc->interrupts.disable = gen11_disable_guc_interrupts;
98 	} else {
99 		guc->notify = gen8_guc_raise_irq;
100 		guc->interrupts.reset = gen9_reset_guc_interrupts;
101 		guc->interrupts.enable = gen9_enable_guc_interrupts;
102 		guc->interrupts.disable = gen9_disable_guc_interrupts;
103 	}
104 }
105 
106 static int guc_shared_data_create(struct intel_guc *guc)
107 {
108 	struct i915_vma *vma;
109 	void *vaddr;
110 
111 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
112 	if (IS_ERR(vma))
113 		return PTR_ERR(vma);
114 
115 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
116 	if (IS_ERR(vaddr)) {
117 		i915_vma_unpin_and_release(&vma, 0);
118 		return PTR_ERR(vaddr);
119 	}
120 
121 	guc->shared_data = vma;
122 	guc->shared_data_vaddr = vaddr;
123 
124 	return 0;
125 }
126 
127 static void guc_shared_data_destroy(struct intel_guc *guc)
128 {
129 	i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
130 }
131 
132 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
133 {
134 	u32 level = intel_guc_log_get_level(&guc->log);
135 	u32 flags = 0;
136 
137 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
138 		flags |= GUC_LOG_DISABLED;
139 	else
140 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
141 			 GUC_LOG_VERBOSITY_SHIFT;
142 
143 	return flags;
144 }
145 
146 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
147 {
148 	u32 flags = 0;
149 
150 	if (!intel_guc_is_submission_supported(guc))
151 		flags |= GUC_CTL_DISABLE_SCHEDULER;
152 
153 	return flags;
154 }
155 
156 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
157 {
158 	u32 flags = 0;
159 
160 	if (intel_guc_is_submission_supported(guc)) {
161 		u32 ctxnum, base;
162 
163 		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
164 		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
165 
166 		base >>= PAGE_SHIFT;
167 		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
168 			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
169 	}
170 	return flags;
171 }
172 
173 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
174 {
175 	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
176 	u32 flags;
177 
178 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
179 	#define UNIT SZ_1M
180 	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
181 	#else
182 	#define UNIT SZ_4K
183 	#define FLAG 0
184 	#endif
185 
186 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
187 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
188 	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
189 	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
190 	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
191 	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
192 
193 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
194 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
195 	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
196 			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
197 	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
198 			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
199 
200 	flags = GUC_LOG_VALID |
201 		GUC_LOG_NOTIFY_ON_HALF_FULL |
202 		FLAG |
203 		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
204 		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
205 		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
206 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
207 
208 	#undef UNIT
209 	#undef FLAG
210 
211 	return flags;
212 }
213 
214 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
215 {
216 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
217 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
218 
219 	return flags;
220 }
221 
222 /*
223  * Initialise the GuC parameter block before starting the firmware
224  * transfer. These parameters are read by the firmware on startup
225  * and cannot be changed thereafter.
226  */
227 static void guc_init_params(struct intel_guc *guc)
228 {
229 	u32 *params = guc->params;
230 	int i;
231 
232 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
233 
234 	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
235 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
236 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
237 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
238 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
239 
240 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
241 		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
242 }
243 
244 /*
245  * Initialise the GuC parameter block before starting the firmware
246  * transfer. These parameters are read by the firmware on startup
247  * and cannot be changed thereafter.
248  */
249 void intel_guc_write_params(struct intel_guc *guc)
250 {
251 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
252 	int i;
253 
254 	/*
255 	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
256 	 * they are power context saved so it's ok to release forcewake
257 	 * when we are done here and take it again at xfer time.
258 	 */
259 	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
260 
261 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
262 
263 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
264 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
265 
266 	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
267 }
268 
269 int intel_guc_init(struct intel_guc *guc)
270 {
271 	struct intel_gt *gt = guc_to_gt(guc);
272 	int ret;
273 
274 	ret = intel_uc_fw_init(&guc->fw);
275 	if (ret)
276 		goto err_fetch;
277 
278 	ret = guc_shared_data_create(guc);
279 	if (ret)
280 		goto err_fw;
281 	GEM_BUG_ON(!guc->shared_data);
282 
283 	ret = intel_guc_log_create(&guc->log);
284 	if (ret)
285 		goto err_shared;
286 
287 	ret = intel_guc_ads_create(guc);
288 	if (ret)
289 		goto err_log;
290 	GEM_BUG_ON(!guc->ads_vma);
291 
292 	ret = intel_guc_ct_init(&guc->ct);
293 	if (ret)
294 		goto err_ads;
295 
296 	if (intel_guc_is_submission_supported(guc)) {
297 		/*
298 		 * This is stuff we need to have available at fw load time
299 		 * if we are planning to enable submission later
300 		 */
301 		ret = intel_guc_submission_init(guc);
302 		if (ret)
303 			goto err_ct;
304 	}
305 
306 	/* now that everything is perma-pinned, initialize the parameters */
307 	guc_init_params(guc);
308 
309 	/* We need to notify the guc whenever we change the GGTT */
310 	i915_ggtt_enable_guc(gt->ggtt);
311 
312 	return 0;
313 
314 err_ct:
315 	intel_guc_ct_fini(&guc->ct);
316 err_ads:
317 	intel_guc_ads_destroy(guc);
318 err_log:
319 	intel_guc_log_destroy(&guc->log);
320 err_shared:
321 	guc_shared_data_destroy(guc);
322 err_fw:
323 	intel_uc_fw_fini(&guc->fw);
324 err_fetch:
325 	intel_uc_fw_cleanup_fetch(&guc->fw);
326 	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
327 	return ret;
328 }
329 
330 void intel_guc_fini(struct intel_guc *guc)
331 {
332 	struct intel_gt *gt = guc_to_gt(guc);
333 
334 	if (!intel_uc_fw_is_available(&guc->fw))
335 		return;
336 
337 	i915_ggtt_disable_guc(gt->ggtt);
338 
339 	if (intel_guc_is_submission_supported(guc))
340 		intel_guc_submission_fini(guc);
341 
342 	intel_guc_ct_fini(&guc->ct);
343 
344 	intel_guc_ads_destroy(guc);
345 	intel_guc_log_destroy(&guc->log);
346 	guc_shared_data_destroy(guc);
347 	intel_uc_fw_fini(&guc->fw);
348 	intel_uc_fw_cleanup_fetch(&guc->fw);
349 }
350 
351 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
352 		       u32 *response_buf, u32 response_buf_size)
353 {
354 	WARN(1, "Unexpected send: action=%#x\n", *action);
355 	return -ENODEV;
356 }
357 
358 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
359 {
360 	WARN(1, "Unexpected event: no suitable handler\n");
361 }
362 
363 /*
364  * This function implements the MMIO based host to GuC interface.
365  */
366 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
367 			u32 *response_buf, u32 response_buf_size)
368 {
369 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
370 	u32 status;
371 	int i;
372 	int ret;
373 
374 	GEM_BUG_ON(!len);
375 	GEM_BUG_ON(len > guc->send_regs.count);
376 
377 	/* We expect only action code */
378 	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
379 
380 	/* If CT is available, we expect to use MMIO only during init/fini */
381 	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
382 		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
383 
384 	mutex_lock(&guc->send_mutex);
385 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
386 
387 	for (i = 0; i < len; i++)
388 		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
389 
390 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
391 
392 	intel_guc_notify(guc);
393 
394 	/*
395 	 * No GuC command should ever take longer than 10ms.
396 	 * Fast commands should still complete in 10us.
397 	 */
398 	ret = __intel_wait_for_register_fw(uncore,
399 					   guc_send_reg(guc, 0),
400 					   INTEL_GUC_MSG_TYPE_MASK,
401 					   INTEL_GUC_MSG_TYPE_RESPONSE <<
402 					   INTEL_GUC_MSG_TYPE_SHIFT,
403 					   10, 10, &status);
404 	/* If GuC explicitly returned an error, convert it to -EIO */
405 	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
406 		ret = -EIO;
407 
408 	if (ret) {
409 		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
410 			  action[0], ret, status);
411 		goto out;
412 	}
413 
414 	if (response_buf) {
415 		int count = min(response_buf_size, guc->send_regs.count - 1);
416 
417 		for (i = 0; i < count; i++)
418 			response_buf[i] = intel_uncore_read(uncore,
419 							    guc_send_reg(guc, i + 1));
420 	}
421 
422 	/* Use data from the GuC response as our return value */
423 	ret = INTEL_GUC_MSG_TO_DATA(status);
424 
425 out:
426 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
427 	mutex_unlock(&guc->send_mutex);
428 
429 	return ret;
430 }
431 
432 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
433 				       const u32 *payload, u32 len)
434 {
435 	u32 msg;
436 
437 	if (unlikely(!len))
438 		return -EPROTO;
439 
440 	/* Make sure to handle only enabled messages */
441 	msg = payload[0] & guc->msg_enabled_mask;
442 
443 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
444 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
445 		intel_guc_log_handle_flush_event(&guc->log);
446 
447 	return 0;
448 }
449 
450 int intel_guc_sample_forcewake(struct intel_guc *guc)
451 {
452 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
453 	u32 action[2];
454 
455 	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
456 	/* WaRsDisableCoarsePowerGating:skl,cnl */
457 	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
458 		action[1] = 0;
459 	else
460 		/* bit 0 and 1 are for Render and Media domain separately */
461 		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
462 
463 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
464 }
465 
466 /**
467  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
468  * @guc: intel_guc structure
469  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
470  *
471  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
472  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
473  * intel_huc_auth().
474  *
475  * Return:	non-zero code on error
476  */
477 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
478 {
479 	u32 action[] = {
480 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
481 		rsa_offset
482 	};
483 
484 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
485 }
486 
487 /**
488  * intel_guc_suspend() - notify GuC entering suspend state
489  * @guc:	the guc
490  */
491 int intel_guc_suspend(struct intel_guc *guc)
492 {
493 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
494 	int ret;
495 	u32 status;
496 	u32 action[] = {
497 		INTEL_GUC_ACTION_ENTER_S_STATE,
498 		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
499 	};
500 
501 	/*
502 	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
503 	 * and then returns, so waiting on the H2G is not enough to guarantee
504 	 * GuC is done. When all the processing is done, GuC writes
505 	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
506 	 * on that. Note that GuC does not ensure that the value in the register
507 	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
508 	 * in progress so we need to take care of that ourselves as well.
509 	 */
510 
511 	intel_uncore_write(uncore, SOFT_SCRATCH(14),
512 			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
513 
514 	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
515 	if (ret)
516 		return ret;
517 
518 	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
519 					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
520 					0, 0, 10, &status);
521 	if (ret)
522 		return ret;
523 
524 	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
525 		DRM_ERROR("GuC failed to change sleep state. "
526 			  "action=0x%x, err=%u\n",
527 			  action[0], status);
528 		return -EIO;
529 	}
530 
531 	return 0;
532 }
533 
534 /**
535  * intel_guc_reset_engine() - ask GuC to reset an engine
536  * @guc:	intel_guc structure
537  * @engine:	engine to be reset
538  */
539 int intel_guc_reset_engine(struct intel_guc *guc,
540 			   struct intel_engine_cs *engine)
541 {
542 	u32 data[7];
543 
544 	GEM_BUG_ON(!guc->execbuf_client);
545 
546 	data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
547 	data[1] = engine->guc_id;
548 	data[2] = 0;
549 	data[3] = 0;
550 	data[4] = 0;
551 	data[5] = guc->execbuf_client->stage_id;
552 	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
553 
554 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
555 }
556 
557 /**
558  * intel_guc_resume() - notify GuC resuming from suspend state
559  * @guc:	the guc
560  */
561 int intel_guc_resume(struct intel_guc *guc)
562 {
563 	u32 action[] = {
564 		INTEL_GUC_ACTION_EXIT_S_STATE,
565 		GUC_POWER_D0,
566 	};
567 
568 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
569 }
570 
571 /**
572  * DOC: GuC Memory Management
573  *
574  * GuC can't allocate any memory for its own usage, so all the allocations must
575  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
576  * exception of the top and bottom parts of the 4GB address space, which are
577  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
578  * or other parts of the HW. The driver must take care not to place objects that
579  * the GuC is going to access in these reserved ranges. The layout of the GuC
580  * address space is shown below:
581  *
582  * ::
583  *
584  *     +===========> +====================+ <== FFFF_FFFF
585  *     ^             |      Reserved      |
586  *     |             +====================+ <== GUC_GGTT_TOP
587  *     |             |                    |
588  *     |             |        DRAM        |
589  *    GuC            |                    |
590  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
591  *   Space     ^     |                    |
592  *     |       |     |                    |
593  *     |      GuC    |        GuC         |
594  *     |     WOPCM   |       WOPCM        |
595  *     |      Size   |                    |
596  *     |       |     |                    |
597  *     v       v     |                    |
598  *     +=======+===> +====================+ <== 0000_0000
599  *
600  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
601  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
602  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
603  */
604 
605 /**
606  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
607  * @guc:	the guc
608  * @size:	size of area to allocate (both virtual space and memory)
609  *
610  * This is a wrapper to create an object for use with the GuC. In order to
611  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
612  * both some backing storage and a range inside the Global GTT. We must pin
613  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
614  * range is reserved inside GuC.
615  *
616  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
617  */
618 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
619 {
620 	struct intel_gt *gt = guc_to_gt(guc);
621 	struct drm_i915_gem_object *obj;
622 	struct i915_vma *vma;
623 	u64 flags;
624 	int ret;
625 
626 	obj = i915_gem_object_create_shmem(gt->i915, size);
627 	if (IS_ERR(obj))
628 		return ERR_CAST(obj);
629 
630 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
631 	if (IS_ERR(vma))
632 		goto err;
633 
634 	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
635 	ret = i915_vma_pin(vma, 0, 0, flags);
636 	if (ret) {
637 		vma = ERR_PTR(ret);
638 		goto err;
639 	}
640 
641 	return i915_vma_make_unshrinkable(vma);
642 
643 err:
644 	i915_gem_object_put(obj);
645 	return vma;
646 }
647