1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "amdgpu_dm_hdcp.h"
27 #include "amdgpu.h"
28 #include "amdgpu_dm.h"
29 #include "dc_fused_io.h"
30 #include "dm_helpers.h"
31 #include <drm/display/drm_hdcp_helper.h>
32 #include "hdcp_psp.h"
33
34 /*
35 * If the SRM version being loaded is less than or equal to the
36 * currently loaded SRM, psp will return 0xFFFF as the version
37 */
38 #define PSP_SRM_VERSION_MAX 0xFFFF
39
40 static bool
lp_write_i2c(void * handle,uint32_t address,const uint8_t * data,uint32_t size)41 lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
42 {
43 struct dc_link *link = handle;
44 struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
45 struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
46 link->dc->caps.i2c_speed_in_khz};
47
48 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
49 }
50
51 static bool
lp_read_i2c(void * handle,uint32_t address,uint8_t offset,uint8_t * data,uint32_t size)52 lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
53 {
54 struct dc_link *link = handle;
55
56 struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
57 {false, address, size, data} };
58 struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
59 link->dc->caps.i2c_speed_in_khz};
60
61 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
62 }
63
64 static bool
lp_write_dpcd(void * handle,uint32_t address,const uint8_t * data,uint32_t size)65 lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
66 {
67 struct dc_link *link = handle;
68
69 return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
70 }
71
72 static bool
lp_read_dpcd(void * handle,uint32_t address,uint8_t * data,uint32_t size)73 lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
74 {
75 struct dc_link *link = handle;
76
77 return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
78 }
79
lp_atomic_write_poll_read_i2c(void * handle,const struct mod_hdcp_atomic_op_i2c * write,const struct mod_hdcp_atomic_op_i2c * poll,struct mod_hdcp_atomic_op_i2c * read,uint32_t poll_timeout_us,uint8_t poll_mask_msb)80 static bool lp_atomic_write_poll_read_i2c(
81 void *handle,
82 const struct mod_hdcp_atomic_op_i2c *write,
83 const struct mod_hdcp_atomic_op_i2c *poll,
84 struct mod_hdcp_atomic_op_i2c *read,
85 uint32_t poll_timeout_us,
86 uint8_t poll_mask_msb
87 )
88 {
89 struct dc_link *link = handle;
90
91 return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
92 }
93
lp_atomic_write_poll_read_aux(void * handle,const struct mod_hdcp_atomic_op_aux * write,const struct mod_hdcp_atomic_op_aux * poll,struct mod_hdcp_atomic_op_aux * read,uint32_t poll_timeout_us,uint8_t poll_mask_msb)94 static bool lp_atomic_write_poll_read_aux(
95 void *handle,
96 const struct mod_hdcp_atomic_op_aux *write,
97 const struct mod_hdcp_atomic_op_aux *poll,
98 struct mod_hdcp_atomic_op_aux *read,
99 uint32_t poll_timeout_us,
100 uint8_t poll_mask_msb
101 )
102 {
103 struct dc_link *link = handle;
104
105 return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
106 }
107
psp_get_srm(struct psp_context * psp,uint32_t * srm_version,uint32_t * srm_size)108 static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
109 {
110 struct ta_hdcp_shared_memory *hdcp_cmd;
111
112 if (!psp->hdcp_context.context.initialized) {
113 DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
114 return NULL;
115 }
116
117 hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
118 memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
119
120 hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
121 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
122
123 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
124 return NULL;
125
126 *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
127 *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
128
129 return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
130 }
131
psp_set_srm(struct psp_context * psp,u8 * srm,uint32_t srm_size,uint32_t * srm_version)132 static int psp_set_srm(struct psp_context *psp,
133 u8 *srm, uint32_t srm_size, uint32_t *srm_version)
134 {
135 struct ta_hdcp_shared_memory *hdcp_cmd;
136
137 if (!psp->hdcp_context.context.initialized) {
138 DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
139 return -EINVAL;
140 }
141
142 hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
143 memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
144
145 memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
146 hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
147 hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
148
149 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
150
151 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
152 hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
153 hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
154 return -EINVAL;
155
156 *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
157 return 0;
158 }
159
process_output(struct hdcp_workqueue * hdcp_work)160 static void process_output(struct hdcp_workqueue *hdcp_work)
161 {
162 struct mod_hdcp_output output = hdcp_work->output;
163
164 if (output.callback_stop)
165 cancel_delayed_work(&hdcp_work->callback_dwork);
166
167 if (output.callback_needed)
168 schedule_delayed_work(&hdcp_work->callback_dwork,
169 msecs_to_jiffies(output.callback_delay));
170
171 if (output.watchdog_timer_stop)
172 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
173
174 if (output.watchdog_timer_needed)
175 schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
176 msecs_to_jiffies(output.watchdog_timer_delay));
177
178 schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
179 }
180
link_lock(struct hdcp_workqueue * work,bool lock)181 static void link_lock(struct hdcp_workqueue *work, bool lock)
182 {
183 int i = 0;
184
185 for (i = 0; i < work->max_link; i++) {
186 if (lock)
187 mutex_lock(&work[i].mutex);
188 else
189 mutex_unlock(&work[i].mutex);
190 }
191 }
192
hdcp_update_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index,struct amdgpu_dm_connector * aconnector,u8 content_type,bool enable_encryption)193 void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
194 unsigned int link_index,
195 struct amdgpu_dm_connector *aconnector,
196 u8 content_type,
197 bool enable_encryption)
198 {
199 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
200 struct mod_hdcp_link_adjustment link_adjust;
201 struct mod_hdcp_display_adjustment display_adjust;
202 unsigned int conn_index = aconnector->base.index;
203
204 guard(mutex)(&hdcp_w->mutex);
205 drm_connector_get(&aconnector->base);
206 if (hdcp_w->aconnector[conn_index])
207 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
208 hdcp_w->aconnector[conn_index] = aconnector;
209
210 memset(&link_adjust, 0, sizeof(link_adjust));
211 memset(&display_adjust, 0, sizeof(display_adjust));
212
213 if (enable_encryption) {
214 /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
215 * (s3 resume case)
216 */
217 if (hdcp_work->srm_size > 0)
218 psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
219 hdcp_work->srm_size,
220 &hdcp_work->srm_version);
221
222 display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
223
224 link_adjust.auth_delay = 2;
225
226 if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
227 link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
228 } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
229 link_adjust.hdcp1.disable = 1;
230 link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
231 }
232
233 schedule_delayed_work(&hdcp_w->property_validate_dwork,
234 msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
235 } else {
236 display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
237 hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
238 cancel_delayed_work(&hdcp_w->property_validate_dwork);
239 }
240
241 mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
242
243 process_output(hdcp_w);
244 }
245
hdcp_remove_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index,struct amdgpu_dm_connector * aconnector)246 static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
247 unsigned int link_index,
248 struct amdgpu_dm_connector *aconnector)
249 {
250 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
251 struct drm_connector_state *conn_state = aconnector->base.state;
252 unsigned int conn_index = aconnector->base.index;
253
254 guard(mutex)(&hdcp_w->mutex);
255
256 /* the removal of display will invoke auth reset -> hdcp destroy and
257 * we'd expect the Content Protection (CP) property changed back to
258 * DESIRED if at the time ENABLED. CP property change should occur
259 * before the element removed from linked list.
260 */
261 if (conn_state && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
262 conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
263
264 DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
265 aconnector->base.index, conn_state->hdcp_content_type,
266 aconnector->base.dpms);
267 }
268
269 mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
270 if (hdcp_w->aconnector[conn_index]) {
271 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
272 hdcp_w->aconnector[conn_index] = NULL;
273 }
274 process_output(hdcp_w);
275 }
276
hdcp_reset_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index)277 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
278 {
279 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
280 unsigned int conn_index;
281
282 guard(mutex)(&hdcp_w->mutex);
283
284 mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
285
286 cancel_delayed_work(&hdcp_w->property_validate_dwork);
287
288 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
289 hdcp_w->encryption_status[conn_index] =
290 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
291 if (hdcp_w->aconnector[conn_index]) {
292 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
293 hdcp_w->aconnector[conn_index] = NULL;
294 }
295 }
296
297 process_output(hdcp_w);
298 }
299
hdcp_handle_cpirq(struct hdcp_workqueue * hdcp_work,unsigned int link_index)300 void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
301 {
302 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
303
304 schedule_work(&hdcp_w->cpirq_work);
305 }
306
event_callback(struct work_struct * work)307 static void event_callback(struct work_struct *work)
308 {
309 struct hdcp_workqueue *hdcp_work;
310
311 hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
312 callback_dwork);
313
314 guard(mutex)(&hdcp_work->mutex);
315
316 cancel_delayed_work(&hdcp_work->callback_dwork);
317
318 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
319 &hdcp_work->output);
320
321 process_output(hdcp_work);
322 }
323
event_property_update(struct work_struct * work)324 static void event_property_update(struct work_struct *work)
325 {
326 struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
327 property_update_work);
328 struct amdgpu_dm_connector *aconnector = NULL;
329 struct drm_device *dev;
330 long ret;
331 unsigned int conn_index;
332 struct drm_connector *connector;
333 struct drm_connector_state *conn_state;
334
335 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
336 aconnector = hdcp_work->aconnector[conn_index];
337
338 if (!aconnector)
339 continue;
340
341 connector = &aconnector->base;
342
343 /* check if display connected */
344 if (connector->status != connector_status_connected)
345 continue;
346
347 conn_state = aconnector->base.state;
348
349 if (!conn_state)
350 continue;
351
352 dev = connector->dev;
353
354 if (!dev)
355 continue;
356
357 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
358 guard(mutex)(&hdcp_work->mutex);
359
360 if (conn_state->commit) {
361 ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
362 10 * HZ);
363 if (ret == 0) {
364 DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
365 hdcp_work->encryption_status[conn_index] =
366 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
367 }
368 }
369 if (hdcp_work->encryption_status[conn_index] !=
370 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
371 if (conn_state->hdcp_content_type ==
372 DRM_MODE_HDCP_CONTENT_TYPE0 &&
373 hdcp_work->encryption_status[conn_index] <=
374 MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
375 DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
376 drm_hdcp_update_content_protection(connector,
377 DRM_MODE_CONTENT_PROTECTION_ENABLED);
378 } else if (conn_state->hdcp_content_type ==
379 DRM_MODE_HDCP_CONTENT_TYPE1 &&
380 hdcp_work->encryption_status[conn_index] ==
381 MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
382 drm_hdcp_update_content_protection(connector,
383 DRM_MODE_CONTENT_PROTECTION_ENABLED);
384 }
385 } else {
386 DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
387 drm_hdcp_update_content_protection(connector,
388 DRM_MODE_CONTENT_PROTECTION_DESIRED);
389 }
390 drm_modeset_unlock(&dev->mode_config.connection_mutex);
391 }
392 }
393
event_property_validate(struct work_struct * work)394 static void event_property_validate(struct work_struct *work)
395 {
396 struct hdcp_workqueue *hdcp_work =
397 container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
398 struct mod_hdcp_display_query query;
399 struct amdgpu_dm_connector *aconnector;
400 unsigned int conn_index;
401
402 guard(mutex)(&hdcp_work->mutex);
403
404 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
405 conn_index++) {
406 aconnector = hdcp_work->aconnector[conn_index];
407
408 if (!aconnector)
409 continue;
410
411 /* check if display connected */
412 if (aconnector->base.status != connector_status_connected)
413 continue;
414
415 if (!aconnector->base.state)
416 continue;
417
418 query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
419 mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index,
420 &query);
421
422 DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
423 aconnector->base.index,
424 aconnector->base.state->content_protection,
425 query.encryption_status,
426 hdcp_work->encryption_status[conn_index]);
427
428 if (query.encryption_status !=
429 hdcp_work->encryption_status[conn_index]) {
430 DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
431 hdcp_work->encryption_status[conn_index],
432 query.encryption_status);
433
434 hdcp_work->encryption_status[conn_index] =
435 query.encryption_status;
436
437 DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n");
438
439 schedule_work(&hdcp_work->property_update_work);
440 }
441 }
442 }
443
event_watchdog_timer(struct work_struct * work)444 static void event_watchdog_timer(struct work_struct *work)
445 {
446 struct hdcp_workqueue *hdcp_work;
447
448 hdcp_work = container_of(to_delayed_work(work),
449 struct hdcp_workqueue,
450 watchdog_timer_dwork);
451
452 guard(mutex)(&hdcp_work->mutex);
453
454 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
455
456 mod_hdcp_process_event(&hdcp_work->hdcp,
457 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
458 &hdcp_work->output);
459
460 process_output(hdcp_work);
461 }
462
event_cpirq(struct work_struct * work)463 static void event_cpirq(struct work_struct *work)
464 {
465 struct hdcp_workqueue *hdcp_work;
466
467 hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
468
469 guard(mutex)(&hdcp_work->mutex);
470
471 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
472
473 process_output(hdcp_work);
474 }
475
hdcp_destroy(struct kobject * kobj,struct hdcp_workqueue * hdcp_work)476 void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
477 {
478 int i = 0;
479
480 for (i = 0; i < hdcp_work->max_link; i++) {
481 cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
482 cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
483 cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
484 }
485
486 sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
487 kfree(hdcp_work->srm);
488 kfree(hdcp_work->srm_temp);
489 kfree(hdcp_work);
490 }
491
enable_assr(void * handle,struct dc_link * link)492 static bool enable_assr(void *handle, struct dc_link *link)
493 {
494 struct hdcp_workqueue *hdcp_work = handle;
495 struct mod_hdcp hdcp = hdcp_work->hdcp;
496 struct psp_context *psp = hdcp.config.psp.handle;
497 struct ta_dtm_shared_memory *dtm_cmd;
498
499 if (!psp->dtm_context.context.initialized) {
500 DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
501 return false;
502 }
503
504 dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
505
506 guard(mutex)(&psp->dtm_context.mutex);
507 memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
508
509 dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
510 dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
511 link->link_enc_hw_inst;
512 dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
513
514 psp_dtm_invoke(psp, dtm_cmd->cmd_id);
515
516 if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
517 DRM_INFO("Failed to enable ASSR");
518 return false;
519 }
520
521 return true;
522 }
523
update_config(void * handle,struct cp_psp_stream_config * config)524 static void update_config(void *handle, struct cp_psp_stream_config *config)
525 {
526 struct hdcp_workqueue *hdcp_work = handle;
527 struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
528 int link_index = aconnector->dc_link->link_index;
529 unsigned int conn_index = aconnector->base.index;
530 struct mod_hdcp_display *display = &hdcp_work[link_index].display;
531 struct mod_hdcp_link *link = &hdcp_work[link_index].link;
532 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
533 struct dc_sink *sink = NULL;
534 bool link_is_hdcp14 = false;
535
536 if (config->dpms_off) {
537 hdcp_remove_display(hdcp_work, link_index, aconnector);
538 return;
539 }
540
541 memset(display, 0, sizeof(*display));
542 memset(link, 0, sizeof(*link));
543
544 display->index = aconnector->base.index;
545 display->state = MOD_HDCP_DISPLAY_ACTIVE;
546
547 if (aconnector->dc_sink)
548 sink = aconnector->dc_sink;
549 else if (aconnector->dc_em_sink)
550 sink = aconnector->dc_em_sink;
551
552 if (sink)
553 link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
554
555 display->controller = CONTROLLER_ID_D0 + config->otg_inst;
556 display->dig_fe = config->dig_fe;
557 link->dig_be = config->dig_be;
558 link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
559 display->stream_enc_idx = config->stream_enc_idx;
560 link->link_enc_idx = config->link_enc_idx;
561 link->dio_output_id = config->dio_output_idx;
562 link->phy_idx = config->phy_idx;
563
564 if (sink)
565 link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
566 link->hdcp_supported_informational = link_is_hdcp14;
567 link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
568 link->dp.assr_enabled = config->assr_enabled;
569 link->dp.mst_enabled = config->mst_enabled;
570 link->dp.dp2_enabled = config->dp2_enabled;
571 link->dp.usb4_enabled = config->usb4_enabled;
572 display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
573 link->adjust.auth_delay = 2;
574 link->adjust.hdcp1.disable = 0;
575 hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
576
577 DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
578 (!!aconnector->base.state) ?
579 aconnector->base.state->content_protection : -1,
580 (!!aconnector->base.state) ?
581 aconnector->base.state->hdcp_content_type : -1);
582
583 guard(mutex)(&hdcp_w->mutex);
584
585 mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
586 drm_connector_get(&aconnector->base);
587 if (hdcp_w->aconnector[conn_index])
588 drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
589 hdcp_w->aconnector[conn_index] = aconnector;
590 process_output(hdcp_w);
591 }
592
593 /**
594 * DOC: Add sysfs interface for set/get srm
595 *
596 * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
597 * will automatically call once or twice depending on the size
598 *
599 * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
600 *
601 * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
602 * srm_data_write can be called multiple times.
603 *
604 * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
605 * the last call we will send the full SRM. PSP will fail on every call before the last.
606 *
607 * This means we don't know if the SRM is good until the last call. And because of this
608 * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
609 *
610 * Example 1:
611 * Good SRM size = 5096
612 * first call to write 4096 -> PSP fails
613 * Second call to write 1000 -> PSP Pass -> SRM is set
614 *
615 * Example 2:
616 * Bad SRM size = 4096
617 * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
618 * is the last call)
619 *
620 * Solution?:
621 * 1: Parse the SRM? -> It is signed so we don't know the EOF
622 * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
623 * below
624 *
625 * Easy Solution:
626 * Always call get after Set to verify if set was successful.
627 * +----------------------+
628 * | Why it works: |
629 * +----------------------+
630 * PSP will only update its srm if its older than the one we are trying to load.
631 * Always do set first than get.
632 * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
633 * version and save it
634 *
635 * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
636 * same(newer) version back and save it
637 *
638 * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
639 * incorrect/corrupted and we should correct our SRM by getting it from PSP
640 */
srm_data_write(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)641 static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
642 const struct bin_attribute *bin_attr, char *buffer,
643 loff_t pos, size_t count)
644 {
645 struct hdcp_workqueue *work;
646 u32 srm_version = 0;
647
648 work = container_of(bin_attr, struct hdcp_workqueue, attr);
649 link_lock(work, true);
650
651 memcpy(work->srm_temp + pos, buffer, count);
652
653 if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
654 DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
655 memcpy(work->srm, work->srm_temp, pos + count);
656 work->srm_size = pos + count;
657 work->srm_version = srm_version;
658 }
659
660 link_lock(work, false);
661
662 return count;
663 }
664
srm_data_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)665 static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
666 const struct bin_attribute *bin_attr, char *buffer,
667 loff_t pos, size_t count)
668 {
669 struct hdcp_workqueue *work;
670 u8 *srm = NULL;
671 u32 srm_version;
672 u32 srm_size;
673 size_t ret = count;
674
675 work = container_of(bin_attr, struct hdcp_workqueue, attr);
676
677 link_lock(work, true);
678
679 srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
680
681 if (!srm) {
682 ret = -EINVAL;
683 goto ret;
684 }
685
686 if (pos >= srm_size)
687 ret = 0;
688
689 if (srm_size - pos < count) {
690 memcpy(buffer, srm + pos, srm_size - pos);
691 ret = srm_size - pos;
692 goto ret;
693 }
694
695 memcpy(buffer, srm + pos, count);
696
697 ret:
698 link_lock(work, false);
699 return ret;
700 }
701
702 /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
703 *
704 * For example,
705 * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
706 * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
707 * across boot/reboots/suspend/resume/shutdown
708 *
709 * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
710 * we need to make the SRM persistent.
711 *
712 * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
713 * -The kernel cannot write to the file systems.
714 * -So we need usermode to do this for us, which is why an interface for usermode is needed
715 *
716 *
717 *
718 * Usermode can read/write to/from PSP using the sysfs interface
719 * For example:
720 * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
721 * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
722 */
723 static const struct bin_attribute data_attr = {
724 .attr = {.name = "hdcp_srm", .mode = 0664},
725 .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
726 .write_new = srm_data_write,
727 .read_new = srm_data_read,
728 };
729
hdcp_create_workqueue(struct amdgpu_device * adev,struct cp_psp * cp_psp,struct dc * dc)730 struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
731 struct cp_psp *cp_psp, struct dc *dc)
732 {
733 int max_caps = dc->caps.max_links;
734 struct hdcp_workqueue *hdcp_work;
735 int i = 0;
736
737 hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
738 if (ZERO_OR_NULL_PTR(hdcp_work))
739 return NULL;
740
741 hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
742 sizeof(*hdcp_work->srm), GFP_KERNEL);
743
744 if (!hdcp_work->srm)
745 goto fail_alloc_context;
746
747 hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
748 sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
749
750 if (!hdcp_work->srm_temp)
751 goto fail_alloc_context;
752
753 hdcp_work->max_link = max_caps;
754
755 for (i = 0; i < max_caps; i++) {
756 mutex_init(&hdcp_work[i].mutex);
757
758 INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
759 INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
760 INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
761 INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
762 INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
763
764 struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
765 struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
766
767 config->psp.handle = &adev->psp;
768 if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
769 dc->ctx->dce_version == DCN_VERSION_3_14 ||
770 dc->ctx->dce_version == DCN_VERSION_3_15 ||
771 dc->ctx->dce_version == DCN_VERSION_3_5 ||
772 dc->ctx->dce_version == DCN_VERSION_3_51 ||
773 dc->ctx->dce_version == DCN_VERSION_3_6 ||
774 dc->ctx->dce_version == DCN_VERSION_3_16)
775 config->psp.caps.dtm_v3_supported = 1;
776 config->ddc.handle = dc_get_link_at_index(dc, i);
777
778 ddc_funcs->write_i2c = lp_write_i2c;
779 ddc_funcs->read_i2c = lp_read_i2c;
780 ddc_funcs->write_dpcd = lp_write_dpcd;
781 ddc_funcs->read_dpcd = lp_read_dpcd;
782
783 config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
784 if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
785 ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
786 ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
787 } else {
788 ddc_funcs->atomic_write_poll_read_i2c = NULL;
789 ddc_funcs->atomic_write_poll_read_aux = NULL;
790 }
791
792 memset(hdcp_work[i].aconnector, 0,
793 sizeof(struct amdgpu_dm_connector *) *
794 AMDGPU_DM_MAX_DISPLAY_INDEX);
795 memset(hdcp_work[i].encryption_status, 0,
796 sizeof(enum mod_hdcp_encryption_status) *
797 AMDGPU_DM_MAX_DISPLAY_INDEX);
798 }
799
800 cp_psp->funcs.update_stream_config = update_config;
801 cp_psp->funcs.enable_assr = enable_assr;
802 cp_psp->handle = hdcp_work;
803
804 /* File created at /sys/class/drm/card0/device/hdcp_srm*/
805 hdcp_work[0].attr = data_attr;
806 sysfs_bin_attr_init(&hdcp_work[0].attr);
807
808 if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
809 DRM_WARN("Failed to create device file hdcp_srm");
810
811 return hdcp_work;
812
813 fail_alloc_context:
814 kfree(hdcp_work->srm);
815 kfree(hdcp_work->srm_temp);
816 kfree(hdcp_work);
817
818 return NULL;
819 }
820
821