1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #undef TRACE_SYSTEM 4 #define TRACE_SYSTEM cxl 5 6 #if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) 7 #define _CXL_EVENTS_H 8 9 #include <linux/tracepoint.h> 10 #include <linux/pci.h> 11 #include <asm-generic/unaligned.h> 12 13 #include <cxl.h> 14 #include <cxlmem.h> 15 #include "core.h" 16 17 #define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0) 18 #define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1) 19 #define CXL_RAS_UC_CACHE_BE_PARITY BIT(2) 20 #define CXL_RAS_UC_CACHE_DATA_ECC BIT(3) 21 #define CXL_RAS_UC_MEM_DATA_PARITY BIT(4) 22 #define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5) 23 #define CXL_RAS_UC_MEM_BE_PARITY BIT(6) 24 #define CXL_RAS_UC_MEM_DATA_ECC BIT(7) 25 #define CXL_RAS_UC_REINIT_THRESH BIT(8) 26 #define CXL_RAS_UC_RSVD_ENCODE BIT(9) 27 #define CXL_RAS_UC_POISON BIT(10) 28 #define CXL_RAS_UC_RECV_OVERFLOW BIT(11) 29 #define CXL_RAS_UC_INTERNAL_ERR BIT(14) 30 #define CXL_RAS_UC_IDE_TX_ERR BIT(15) 31 #define CXL_RAS_UC_IDE_RX_ERR BIT(16) 32 33 #define show_uc_errs(status) __print_flags(status, " | ", \ 34 { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \ 35 { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \ 36 { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \ 37 { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ 38 { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \ 39 { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \ 40 { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \ 41 { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \ 42 { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \ 43 { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \ 44 { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \ 45 { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \ 46 { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \ 47 { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \ 48 { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \ 49 ) 50 51 TRACE_EVENT(cxl_aer_uncorrectable_error, 52 TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl), 53 TP_ARGS(cxlmd, status, fe, hl), 54 TP_STRUCT__entry( 55 __string(memdev, dev_name(&cxlmd->dev)) 56 __string(host, dev_name(cxlmd->dev.parent)) 57 __field(u64, serial) 58 __field(u32, status) 59 __field(u32, first_error) 60 __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) 61 ), 62 TP_fast_assign( 63 __assign_str(memdev, dev_name(&cxlmd->dev)); 64 __assign_str(host, dev_name(cxlmd->dev.parent)); 65 __entry->serial = cxlmd->cxlds->serial; 66 __entry->status = status; 67 __entry->first_error = fe; 68 /* 69 * Embed the 512B headerlog data for user app retrieval and 70 * parsing, but no need to print this in the trace buffer. 71 */ 72 memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE); 73 ), 74 TP_printk("memdev=%s host=%s serial=%lld: status: '%s' first_error: '%s'", 75 __get_str(memdev), __get_str(host), __entry->serial, 76 show_uc_errs(__entry->status), 77 show_uc_errs(__entry->first_error) 78 ) 79 ); 80 81 #define CXL_RAS_CE_CACHE_DATA_ECC BIT(0) 82 #define CXL_RAS_CE_MEM_DATA_ECC BIT(1) 83 #define CXL_RAS_CE_CRC_THRESH BIT(2) 84 #define CLX_RAS_CE_RETRY_THRESH BIT(3) 85 #define CXL_RAS_CE_CACHE_POISON BIT(4) 86 #define CXL_RAS_CE_MEM_POISON BIT(5) 87 #define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6) 88 89 #define show_ce_errs(status) __print_flags(status, " | ", \ 90 { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ 91 { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \ 92 { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \ 93 { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \ 94 { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \ 95 { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \ 96 { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \ 97 ) 98 99 TRACE_EVENT(cxl_aer_correctable_error, 100 TP_PROTO(const struct cxl_memdev *cxlmd, u32 status), 101 TP_ARGS(cxlmd, status), 102 TP_STRUCT__entry( 103 __string(memdev, dev_name(&cxlmd->dev)) 104 __string(host, dev_name(cxlmd->dev.parent)) 105 __field(u64, serial) 106 __field(u32, status) 107 ), 108 TP_fast_assign( 109 __assign_str(memdev, dev_name(&cxlmd->dev)); 110 __assign_str(host, dev_name(cxlmd->dev.parent)); 111 __entry->serial = cxlmd->cxlds->serial; 112 __entry->status = status; 113 ), 114 TP_printk("memdev=%s host=%s serial=%lld: status: '%s'", 115 __get_str(memdev), __get_str(host), __entry->serial, 116 show_ce_errs(__entry->status) 117 ) 118 ); 119 120 #define cxl_event_log_type_str(type) \ 121 __print_symbolic(type, \ 122 { CXL_EVENT_TYPE_INFO, "Informational" }, \ 123 { CXL_EVENT_TYPE_WARN, "Warning" }, \ 124 { CXL_EVENT_TYPE_FAIL, "Failure" }, \ 125 { CXL_EVENT_TYPE_FATAL, "Fatal" }) 126 127 TRACE_EVENT(cxl_overflow, 128 129 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 130 struct cxl_get_event_payload *payload), 131 132 TP_ARGS(cxlmd, log, payload), 133 134 TP_STRUCT__entry( 135 __string(memdev, dev_name(&cxlmd->dev)) 136 __string(host, dev_name(cxlmd->dev.parent)) 137 __field(int, log) 138 __field(u64, serial) 139 __field(u64, first_ts) 140 __field(u64, last_ts) 141 __field(u16, count) 142 ), 143 144 TP_fast_assign( 145 __assign_str(memdev, dev_name(&cxlmd->dev)); 146 __assign_str(host, dev_name(cxlmd->dev.parent)); 147 __entry->serial = cxlmd->cxlds->serial; 148 __entry->log = log; 149 __entry->count = le16_to_cpu(payload->overflow_err_count); 150 __entry->first_ts = le64_to_cpu(payload->first_overflow_timestamp); 151 __entry->last_ts = le64_to_cpu(payload->last_overflow_timestamp); 152 ), 153 154 TP_printk("memdev=%s host=%s serial=%lld: log=%s : %u records from %llu to %llu", 155 __get_str(memdev), __get_str(host), __entry->serial, 156 cxl_event_log_type_str(__entry->log), __entry->count, 157 __entry->first_ts, __entry->last_ts) 158 159 ); 160 161 /* 162 * Common Event Record Format 163 * CXL 3.0 section 8.2.9.2.1; Table 8-42 164 */ 165 #define CXL_EVENT_RECORD_FLAG_PERMANENT BIT(2) 166 #define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3) 167 #define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4) 168 #define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5) 169 #define show_hdr_flags(flags) __print_flags(flags, " | ", \ 170 { CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \ 171 { CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \ 172 { CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \ 173 { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \ 174 ) 175 176 /* 177 * Define macros for the common header of each CXL event. 178 * 179 * Tracepoints using these macros must do 3 things: 180 * 181 * 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry 182 * 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign; 183 * pass the dev, log, and CXL event header 184 * 3) Use CXL_EVT_TP_printk() instead of TP_printk() 185 * 186 * See the generic_event tracepoint as an example. 187 */ 188 #define CXL_EVT_TP_entry \ 189 __string(memdev, dev_name(&cxlmd->dev)) \ 190 __string(host, dev_name(cxlmd->dev.parent)) \ 191 __field(int, log) \ 192 __field_struct(uuid_t, hdr_uuid) \ 193 __field(u64, serial) \ 194 __field(u32, hdr_flags) \ 195 __field(u16, hdr_handle) \ 196 __field(u16, hdr_related_handle) \ 197 __field(u64, hdr_timestamp) \ 198 __field(u8, hdr_length) \ 199 __field(u8, hdr_maint_op_class) 200 201 #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ 202 __assign_str(memdev, dev_name(&(cxlmd)->dev)); \ 203 __assign_str(host, dev_name((cxlmd)->dev.parent)); \ 204 __entry->log = (l); \ 205 __entry->serial = (cxlmd)->cxlds->serial; \ 206 memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t)); \ 207 __entry->hdr_length = (hdr).length; \ 208 __entry->hdr_flags = get_unaligned_le24((hdr).flags); \ 209 __entry->hdr_handle = le16_to_cpu((hdr).handle); \ 210 __entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \ 211 __entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \ 212 __entry->hdr_maint_op_class = (hdr).maint_op_class 213 214 #define CXL_EVT_TP_printk(fmt, ...) \ 215 TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \ 216 "len=%d flags='%s' handle=%x related_handle=%x " \ 217 "maint_op_class=%u : " fmt, \ 218 __get_str(memdev), __get_str(host), __entry->serial, \ 219 cxl_event_log_type_str(__entry->log), \ 220 __entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\ 221 show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \ 222 __entry->hdr_related_handle, __entry->hdr_maint_op_class, \ 223 ##__VA_ARGS__) 224 225 TRACE_EVENT(cxl_generic_event, 226 227 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 228 struct cxl_event_record_raw *rec), 229 230 TP_ARGS(cxlmd, log, rec), 231 232 TP_STRUCT__entry( 233 CXL_EVT_TP_entry 234 __array(u8, data, CXL_EVENT_RECORD_DATA_LENGTH) 235 ), 236 237 TP_fast_assign( 238 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 239 memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH); 240 ), 241 242 CXL_EVT_TP_printk("%s", 243 __print_hex(__entry->data, CXL_EVENT_RECORD_DATA_LENGTH)) 244 ); 245 246 /* 247 * Physical Address field masks 248 * 249 * General Media Event Record 250 * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 251 * 252 * DRAM Event Record 253 * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 254 */ 255 #define CXL_DPA_FLAGS_MASK 0x3F 256 #define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) 257 258 #define CXL_DPA_VOLATILE BIT(0) 259 #define CXL_DPA_NOT_REPAIRABLE BIT(1) 260 #define show_dpa_flags(flags) __print_flags(flags, "|", \ 261 { CXL_DPA_VOLATILE, "VOLATILE" }, \ 262 { CXL_DPA_NOT_REPAIRABLE, "NOT_REPAIRABLE" } \ 263 ) 264 265 /* 266 * General Media Event Record - GMER 267 * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 268 */ 269 #define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0) 270 #define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1) 271 #define CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW BIT(2) 272 #define show_event_desc_flags(flags) __print_flags(flags, "|", \ 273 { CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, "UNCORRECTABLE_EVENT" }, \ 274 { CXL_GMER_EVT_DESC_THRESHOLD_EVENT, "THRESHOLD_EVENT" }, \ 275 { CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW, "POISON_LIST_OVERFLOW" } \ 276 ) 277 278 #define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00 279 #define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01 280 #define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02 281 #define show_mem_event_type(type) __print_symbolic(type, \ 282 { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ 283 { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ 284 { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ 285 ) 286 287 #define CXL_GMER_TRANS_UNKNOWN 0x00 288 #define CXL_GMER_TRANS_HOST_READ 0x01 289 #define CXL_GMER_TRANS_HOST_WRITE 0x02 290 #define CXL_GMER_TRANS_HOST_SCAN_MEDIA 0x03 291 #define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04 292 #define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05 293 #define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06 294 #define show_trans_type(type) __print_symbolic(type, \ 295 { CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \ 296 { CXL_GMER_TRANS_HOST_READ, "Host Read" }, \ 297 { CXL_GMER_TRANS_HOST_WRITE, "Host Write" }, \ 298 { CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \ 299 { CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \ 300 { CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \ 301 { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \ 302 ) 303 304 #define CXL_GMER_VALID_CHANNEL BIT(0) 305 #define CXL_GMER_VALID_RANK BIT(1) 306 #define CXL_GMER_VALID_DEVICE BIT(2) 307 #define CXL_GMER_VALID_COMPONENT BIT(3) 308 #define show_valid_flags(flags) __print_flags(flags, "|", \ 309 { CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \ 310 { CXL_GMER_VALID_RANK, "RANK" }, \ 311 { CXL_GMER_VALID_DEVICE, "DEVICE" }, \ 312 { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \ 313 ) 314 315 TRACE_EVENT(cxl_general_media, 316 317 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 318 struct cxl_event_gen_media *rec), 319 320 TP_ARGS(cxlmd, log, rec), 321 322 TP_STRUCT__entry( 323 CXL_EVT_TP_entry 324 /* General Media */ 325 __field(u64, dpa) 326 __field(u8, descriptor) 327 __field(u8, type) 328 __field(u8, transaction_type) 329 __field(u8, channel) 330 __field(u32, device) 331 __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) 332 __field(u16, validity_flags) 333 /* Following are out of order to pack trace record */ 334 __field(u8, rank) 335 __field(u8, dpa_flags) 336 ), 337 338 TP_fast_assign( 339 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 340 341 /* General Media */ 342 __entry->dpa = le64_to_cpu(rec->phys_addr); 343 __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; 344 /* Mask after flags have been parsed */ 345 __entry->dpa &= CXL_DPA_MASK; 346 __entry->descriptor = rec->descriptor; 347 __entry->type = rec->type; 348 __entry->transaction_type = rec->transaction_type; 349 __entry->channel = rec->channel; 350 __entry->rank = rec->rank; 351 __entry->device = get_unaligned_le24(rec->device); 352 memcpy(__entry->comp_id, &rec->component_id, 353 CXL_EVENT_GEN_MED_COMP_ID_SIZE); 354 __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); 355 ), 356 357 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ 358 "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ 359 "device=%x comp_id=%s validity_flags='%s'", 360 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 361 show_event_desc_flags(__entry->descriptor), 362 show_mem_event_type(__entry->type), 363 show_trans_type(__entry->transaction_type), 364 __entry->channel, __entry->rank, __entry->device, 365 __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), 366 show_valid_flags(__entry->validity_flags) 367 ) 368 ); 369 370 /* 371 * DRAM Event Record - DER 372 * 373 * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 374 */ 375 /* 376 * DRAM Event Record defines many fields the same as the General Media Event 377 * Record. Reuse those definitions as appropriate. 378 */ 379 #define CXL_DER_VALID_CHANNEL BIT(0) 380 #define CXL_DER_VALID_RANK BIT(1) 381 #define CXL_DER_VALID_NIBBLE BIT(2) 382 #define CXL_DER_VALID_BANK_GROUP BIT(3) 383 #define CXL_DER_VALID_BANK BIT(4) 384 #define CXL_DER_VALID_ROW BIT(5) 385 #define CXL_DER_VALID_COLUMN BIT(6) 386 #define CXL_DER_VALID_CORRECTION_MASK BIT(7) 387 #define show_dram_valid_flags(flags) __print_flags(flags, "|", \ 388 { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \ 389 { CXL_DER_VALID_RANK, "RANK" }, \ 390 { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \ 391 { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \ 392 { CXL_DER_VALID_BANK, "BANK" }, \ 393 { CXL_DER_VALID_ROW, "ROW" }, \ 394 { CXL_DER_VALID_COLUMN, "COLUMN" }, \ 395 { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \ 396 ) 397 398 TRACE_EVENT(cxl_dram, 399 400 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 401 struct cxl_event_dram *rec), 402 403 TP_ARGS(cxlmd, log, rec), 404 405 TP_STRUCT__entry( 406 CXL_EVT_TP_entry 407 /* DRAM */ 408 __field(u64, dpa) 409 __field(u8, descriptor) 410 __field(u8, type) 411 __field(u8, transaction_type) 412 __field(u8, channel) 413 __field(u16, validity_flags) 414 __field(u16, column) /* Out of order to pack trace record */ 415 __field(u32, nibble_mask) 416 __field(u32, row) 417 __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) 418 __field(u8, rank) /* Out of order to pack trace record */ 419 __field(u8, bank_group) /* Out of order to pack trace record */ 420 __field(u8, bank) /* Out of order to pack trace record */ 421 __field(u8, dpa_flags) /* Out of order to pack trace record */ 422 ), 423 424 TP_fast_assign( 425 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 426 427 /* DRAM */ 428 __entry->dpa = le64_to_cpu(rec->phys_addr); 429 __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; 430 __entry->dpa &= CXL_DPA_MASK; 431 __entry->descriptor = rec->descriptor; 432 __entry->type = rec->type; 433 __entry->transaction_type = rec->transaction_type; 434 __entry->validity_flags = get_unaligned_le16(rec->validity_flags); 435 __entry->channel = rec->channel; 436 __entry->rank = rec->rank; 437 __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask); 438 __entry->bank_group = rec->bank_group; 439 __entry->bank = rec->bank; 440 __entry->row = get_unaligned_le24(rec->row); 441 __entry->column = get_unaligned_le16(rec->column); 442 memcpy(__entry->cor_mask, &rec->correction_mask, 443 CXL_EVENT_DER_CORRECTION_MASK_SIZE); 444 ), 445 446 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ 447 "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ 448 "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ 449 "validity_flags='%s'", 450 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 451 show_event_desc_flags(__entry->descriptor), 452 show_mem_event_type(__entry->type), 453 show_trans_type(__entry->transaction_type), 454 __entry->channel, __entry->rank, __entry->nibble_mask, 455 __entry->bank_group, __entry->bank, 456 __entry->row, __entry->column, 457 __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), 458 show_dram_valid_flags(__entry->validity_flags) 459 ) 460 ); 461 462 /* 463 * Memory Module Event Record - MMER 464 * 465 * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45 466 */ 467 #define CXL_MMER_HEALTH_STATUS_CHANGE 0x00 468 #define CXL_MMER_MEDIA_STATUS_CHANGE 0x01 469 #define CXL_MMER_LIFE_USED_CHANGE 0x02 470 #define CXL_MMER_TEMP_CHANGE 0x03 471 #define CXL_MMER_DATA_PATH_ERROR 0x04 472 #define CXL_MMER_LSA_ERROR 0x05 473 #define show_dev_evt_type(type) __print_symbolic(type, \ 474 { CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \ 475 { CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \ 476 { CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \ 477 { CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \ 478 { CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \ 479 { CXL_MMER_LSA_ERROR, "LSA Error" } \ 480 ) 481 482 /* 483 * Device Health Information - DHI 484 * 485 * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100 486 */ 487 #define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0) 488 #define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1) 489 #define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2) 490 #define show_health_status_flags(flags) __print_flags(flags, "|", \ 491 { CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \ 492 { CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \ 493 { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \ 494 ) 495 496 #define CXL_DHI_MS_NORMAL 0x00 497 #define CXL_DHI_MS_NOT_READY 0x01 498 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOST 0x02 499 #define CXL_DHI_MS_ALL_DATA_LOST 0x03 500 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS 0x04 501 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN 0x05 502 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT 0x06 503 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS 0x07 504 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN 0x08 505 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT 0x09 506 #define show_media_status(ms) __print_symbolic(ms, \ 507 { CXL_DHI_MS_NORMAL, \ 508 "Normal" }, \ 509 { CXL_DHI_MS_NOT_READY, \ 510 "Not Ready" }, \ 511 { CXL_DHI_MS_WRITE_PERSISTENCY_LOST, \ 512 "Write Persistency Lost" }, \ 513 { CXL_DHI_MS_ALL_DATA_LOST, \ 514 "All Data Lost" }, \ 515 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS, \ 516 "Write Persistency Loss in the Event of Power Loss" }, \ 517 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN, \ 518 "Write Persistency Loss in Event of Shutdown" }, \ 519 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT, \ 520 "Write Persistency Loss Imminent" }, \ 521 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS, \ 522 "All Data Loss in Event of Power Loss" }, \ 523 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN, \ 524 "All Data loss in the Event of Shutdown" }, \ 525 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT, \ 526 "All Data Loss Imminent" } \ 527 ) 528 529 #define CXL_DHI_AS_NORMAL 0x0 530 #define CXL_DHI_AS_WARNING 0x1 531 #define CXL_DHI_AS_CRITICAL 0x2 532 #define show_two_bit_status(as) __print_symbolic(as, \ 533 { CXL_DHI_AS_NORMAL, "Normal" }, \ 534 { CXL_DHI_AS_WARNING, "Warning" }, \ 535 { CXL_DHI_AS_CRITICAL, "Critical" } \ 536 ) 537 #define show_one_bit_status(as) __print_symbolic(as, \ 538 { CXL_DHI_AS_NORMAL, "Normal" }, \ 539 { CXL_DHI_AS_WARNING, "Warning" } \ 540 ) 541 542 #define CXL_DHI_AS_LIFE_USED(as) (as & 0x3) 543 #define CXL_DHI_AS_DEV_TEMP(as) ((as & 0xC) >> 2) 544 #define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4) 545 #define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5) 546 547 TRACE_EVENT(cxl_memory_module, 548 549 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 550 struct cxl_event_mem_module *rec), 551 552 TP_ARGS(cxlmd, log, rec), 553 554 TP_STRUCT__entry( 555 CXL_EVT_TP_entry 556 557 /* Memory Module Event */ 558 __field(u8, event_type) 559 560 /* Device Health Info */ 561 __field(u8, health_status) 562 __field(u8, media_status) 563 __field(u8, life_used) 564 __field(u32, dirty_shutdown_cnt) 565 __field(u32, cor_vol_err_cnt) 566 __field(u32, cor_per_err_cnt) 567 __field(s16, device_temp) 568 __field(u8, add_status) 569 ), 570 571 TP_fast_assign( 572 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 573 574 /* Memory Module Event */ 575 __entry->event_type = rec->event_type; 576 577 /* Device Health Info */ 578 __entry->health_status = rec->info.health_status; 579 __entry->media_status = rec->info.media_status; 580 __entry->life_used = rec->info.life_used; 581 __entry->dirty_shutdown_cnt = get_unaligned_le32(rec->info.dirty_shutdown_cnt); 582 __entry->cor_vol_err_cnt = get_unaligned_le32(rec->info.cor_vol_err_cnt); 583 __entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt); 584 __entry->device_temp = get_unaligned_le16(rec->info.device_temp); 585 __entry->add_status = rec->info.add_status; 586 ), 587 588 CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \ 589 "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \ 590 "as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \ 591 "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u", 592 show_dev_evt_type(__entry->event_type), 593 show_health_status_flags(__entry->health_status), 594 show_media_status(__entry->media_status), 595 show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)), 596 show_two_bit_status(CXL_DHI_AS_DEV_TEMP(__entry->add_status)), 597 show_one_bit_status(CXL_DHI_AS_COR_VOL_ERR_CNT(__entry->add_status)), 598 show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)), 599 __entry->life_used, __entry->device_temp, 600 __entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt, 601 __entry->cor_per_err_cnt 602 ) 603 ); 604 605 #define show_poison_trace_type(type) \ 606 __print_symbolic(type, \ 607 { CXL_POISON_TRACE_LIST, "List" }, \ 608 { CXL_POISON_TRACE_INJECT, "Inject" }, \ 609 { CXL_POISON_TRACE_CLEAR, "Clear" }) 610 611 #define __show_poison_source(source) \ 612 __print_symbolic(source, \ 613 { CXL_POISON_SOURCE_UNKNOWN, "Unknown" }, \ 614 { CXL_POISON_SOURCE_EXTERNAL, "External" }, \ 615 { CXL_POISON_SOURCE_INTERNAL, "Internal" }, \ 616 { CXL_POISON_SOURCE_INJECTED, "Injected" }, \ 617 { CXL_POISON_SOURCE_VENDOR, "Vendor" }) 618 619 #define show_poison_source(source) \ 620 (((source > CXL_POISON_SOURCE_INJECTED) && \ 621 (source != CXL_POISON_SOURCE_VENDOR)) ? "Reserved" \ 622 : __show_poison_source(source)) 623 624 #define show_poison_flags(flags) \ 625 __print_flags(flags, "|", \ 626 { CXL_POISON_FLAG_MORE, "More" }, \ 627 { CXL_POISON_FLAG_OVERFLOW, "Overflow" }, \ 628 { CXL_POISON_FLAG_SCANNING, "Scanning" }) 629 630 #define __cxl_poison_addr(record) \ 631 (le64_to_cpu(record->address)) 632 #define cxl_poison_record_dpa(record) \ 633 (__cxl_poison_addr(record) & CXL_POISON_START_MASK) 634 #define cxl_poison_record_source(record) \ 635 (__cxl_poison_addr(record) & CXL_POISON_SOURCE_MASK) 636 #define cxl_poison_record_dpa_length(record) \ 637 (le32_to_cpu(record->length) * CXL_POISON_LEN_MULT) 638 #define cxl_poison_overflow(flags, time) \ 639 (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) 640 641 u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); 642 643 TRACE_EVENT(cxl_poison, 644 645 TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region, 646 const struct cxl_poison_record *record, u8 flags, 647 __le64 overflow_ts, enum cxl_poison_trace_type trace_type), 648 649 TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type), 650 651 TP_STRUCT__entry( 652 __string(memdev, dev_name(&cxlmd->dev)) 653 __string(host, dev_name(cxlmd->dev.parent)) 654 __field(u64, serial) 655 __field(u8, trace_type) 656 __string(region, region) 657 __field(u64, overflow_ts) 658 __field(u64, hpa) 659 __field(u64, dpa) 660 __field(u32, dpa_length) 661 __array(char, uuid, 16) 662 __field(u8, source) 663 __field(u8, flags) 664 ), 665 666 TP_fast_assign( 667 __assign_str(memdev, dev_name(&cxlmd->dev)); 668 __assign_str(host, dev_name(cxlmd->dev.parent)); 669 __entry->serial = cxlmd->cxlds->serial; 670 __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); 671 __entry->dpa = cxl_poison_record_dpa(record); 672 __entry->dpa_length = cxl_poison_record_dpa_length(record); 673 __entry->source = cxl_poison_record_source(record); 674 __entry->trace_type = trace_type; 675 __entry->flags = flags; 676 if (region) { 677 __assign_str(region, dev_name(®ion->dev)); 678 memcpy(__entry->uuid, ®ion->params.uuid, 16); 679 __entry->hpa = cxl_trace_hpa(region, cxlmd, 680 __entry->dpa); 681 } else { 682 __assign_str(region, ""); 683 memset(__entry->uuid, 0, 16); 684 __entry->hpa = ULLONG_MAX; 685 } 686 ), 687 688 TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \ 689 "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \ 690 "source=%s flags=%s overflow_time=%llu", 691 __get_str(memdev), 692 __get_str(host), 693 __entry->serial, 694 show_poison_trace_type(__entry->trace_type), 695 __get_str(region), 696 __entry->uuid, 697 __entry->hpa, 698 __entry->dpa, 699 __entry->dpa_length, 700 show_poison_source(__entry->source), 701 show_poison_flags(__entry->flags), 702 __entry->overflow_ts 703 ) 704 ); 705 706 #endif /* _CXL_EVENTS_H */ 707 708 #define TRACE_INCLUDE_FILE trace 709 #include <trace/define_trace.h> 710