1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #undef TRACE_SYSTEM 4 #define TRACE_SYSTEM cxl 5 6 #if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) 7 #define _CXL_EVENTS_H 8 9 #include <linux/tracepoint.h> 10 #include <linux/pci.h> 11 #include <linux/unaligned.h> 12 13 #include <cxl.h> 14 #include <cxlmem.h> 15 #include "core.h" 16 17 #define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0) 18 #define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1) 19 #define CXL_RAS_UC_CACHE_BE_PARITY BIT(2) 20 #define CXL_RAS_UC_CACHE_DATA_ECC BIT(3) 21 #define CXL_RAS_UC_MEM_DATA_PARITY BIT(4) 22 #define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5) 23 #define CXL_RAS_UC_MEM_BE_PARITY BIT(6) 24 #define CXL_RAS_UC_MEM_DATA_ECC BIT(7) 25 #define CXL_RAS_UC_REINIT_THRESH BIT(8) 26 #define CXL_RAS_UC_RSVD_ENCODE BIT(9) 27 #define CXL_RAS_UC_POISON BIT(10) 28 #define CXL_RAS_UC_RECV_OVERFLOW BIT(11) 29 #define CXL_RAS_UC_INTERNAL_ERR BIT(14) 30 #define CXL_RAS_UC_IDE_TX_ERR BIT(15) 31 #define CXL_RAS_UC_IDE_RX_ERR BIT(16) 32 33 #define show_uc_errs(status) __print_flags(status, " | ", \ 34 { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \ 35 { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \ 36 { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \ 37 { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ 38 { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \ 39 { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \ 40 { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \ 41 { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \ 42 { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \ 43 { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \ 44 { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \ 45 { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \ 46 { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \ 47 { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \ 48 { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \ 49 ) 50 51 TRACE_EVENT(cxl_aer_uncorrectable_error, 52 TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl), 53 TP_ARGS(cxlmd, status, fe, hl), 54 TP_STRUCT__entry( 55 __string(memdev, dev_name(&cxlmd->dev)) 56 __string(host, dev_name(cxlmd->dev.parent)) 57 __field(u64, serial) 58 __field(u32, status) 59 __field(u32, first_error) 60 __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) 61 ), 62 TP_fast_assign( 63 __assign_str(memdev); 64 __assign_str(host); 65 __entry->serial = cxlmd->cxlds->serial; 66 __entry->status = status; 67 __entry->first_error = fe; 68 /* 69 * Embed the 512B headerlog data for user app retrieval and 70 * parsing, but no need to print this in the trace buffer. 71 */ 72 memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE); 73 ), 74 TP_printk("memdev=%s host=%s serial=%lld: status: '%s' first_error: '%s'", 75 __get_str(memdev), __get_str(host), __entry->serial, 76 show_uc_errs(__entry->status), 77 show_uc_errs(__entry->first_error) 78 ) 79 ); 80 81 #define CXL_RAS_CE_CACHE_DATA_ECC BIT(0) 82 #define CXL_RAS_CE_MEM_DATA_ECC BIT(1) 83 #define CXL_RAS_CE_CRC_THRESH BIT(2) 84 #define CLX_RAS_CE_RETRY_THRESH BIT(3) 85 #define CXL_RAS_CE_CACHE_POISON BIT(4) 86 #define CXL_RAS_CE_MEM_POISON BIT(5) 87 #define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6) 88 89 #define show_ce_errs(status) __print_flags(status, " | ", \ 90 { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ 91 { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \ 92 { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \ 93 { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \ 94 { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \ 95 { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \ 96 { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \ 97 ) 98 99 TRACE_EVENT(cxl_aer_correctable_error, 100 TP_PROTO(const struct cxl_memdev *cxlmd, u32 status), 101 TP_ARGS(cxlmd, status), 102 TP_STRUCT__entry( 103 __string(memdev, dev_name(&cxlmd->dev)) 104 __string(host, dev_name(cxlmd->dev.parent)) 105 __field(u64, serial) 106 __field(u32, status) 107 ), 108 TP_fast_assign( 109 __assign_str(memdev); 110 __assign_str(host); 111 __entry->serial = cxlmd->cxlds->serial; 112 __entry->status = status; 113 ), 114 TP_printk("memdev=%s host=%s serial=%lld: status: '%s'", 115 __get_str(memdev), __get_str(host), __entry->serial, 116 show_ce_errs(__entry->status) 117 ) 118 ); 119 120 #define cxl_event_log_type_str(type) \ 121 __print_symbolic(type, \ 122 { CXL_EVENT_TYPE_INFO, "Informational" }, \ 123 { CXL_EVENT_TYPE_WARN, "Warning" }, \ 124 { CXL_EVENT_TYPE_FAIL, "Failure" }, \ 125 { CXL_EVENT_TYPE_FATAL, "Fatal" }) 126 127 TRACE_EVENT(cxl_overflow, 128 129 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 130 struct cxl_get_event_payload *payload), 131 132 TP_ARGS(cxlmd, log, payload), 133 134 TP_STRUCT__entry( 135 __string(memdev, dev_name(&cxlmd->dev)) 136 __string(host, dev_name(cxlmd->dev.parent)) 137 __field(int, log) 138 __field(u64, serial) 139 __field(u64, first_ts) 140 __field(u64, last_ts) 141 __field(u16, count) 142 ), 143 144 TP_fast_assign( 145 __assign_str(memdev); 146 __assign_str(host); 147 __entry->serial = cxlmd->cxlds->serial; 148 __entry->log = log; 149 __entry->count = le16_to_cpu(payload->overflow_err_count); 150 __entry->first_ts = le64_to_cpu(payload->first_overflow_timestamp); 151 __entry->last_ts = le64_to_cpu(payload->last_overflow_timestamp); 152 ), 153 154 TP_printk("memdev=%s host=%s serial=%lld: log=%s : %u records from %llu to %llu", 155 __get_str(memdev), __get_str(host), __entry->serial, 156 cxl_event_log_type_str(__entry->log), __entry->count, 157 __entry->first_ts, __entry->last_ts) 158 159 ); 160 161 /* 162 * Common Event Record Format 163 * CXL 3.0 section 8.2.9.2.1; Table 8-42 164 */ 165 #define CXL_EVENT_RECORD_FLAG_PERMANENT BIT(2) 166 #define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3) 167 #define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4) 168 #define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5) 169 #define show_hdr_flags(flags) __print_flags(flags, " | ", \ 170 { CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \ 171 { CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \ 172 { CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \ 173 { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \ 174 ) 175 176 /* 177 * Define macros for the common header of each CXL event. 178 * 179 * Tracepoints using these macros must do 3 things: 180 * 181 * 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry 182 * 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign; 183 * pass the dev, log, and CXL event header 184 * NOTE: The uuid must be assigned by the specific trace event 185 * 3) Use CXL_EVT_TP_printk() instead of TP_printk() 186 * 187 * See the generic_event tracepoint as an example. 188 */ 189 #define CXL_EVT_TP_entry \ 190 __string(memdev, dev_name(&cxlmd->dev)) \ 191 __string(host, dev_name(cxlmd->dev.parent)) \ 192 __field(int, log) \ 193 __field_struct(uuid_t, hdr_uuid) \ 194 __field(u64, serial) \ 195 __field(u32, hdr_flags) \ 196 __field(u16, hdr_handle) \ 197 __field(u16, hdr_related_handle) \ 198 __field(u64, hdr_timestamp) \ 199 __field(u8, hdr_length) \ 200 __field(u8, hdr_maint_op_class) 201 202 #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ 203 __assign_str(memdev); \ 204 __assign_str(host); \ 205 __entry->log = (l); \ 206 __entry->serial = (cxlmd)->cxlds->serial; \ 207 __entry->hdr_length = (hdr).length; \ 208 __entry->hdr_flags = get_unaligned_le24((hdr).flags); \ 209 __entry->hdr_handle = le16_to_cpu((hdr).handle); \ 210 __entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \ 211 __entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \ 212 __entry->hdr_maint_op_class = (hdr).maint_op_class 213 214 #define CXL_EVT_TP_printk(fmt, ...) \ 215 TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \ 216 "len=%d flags='%s' handle=%x related_handle=%x " \ 217 "maint_op_class=%u : " fmt, \ 218 __get_str(memdev), __get_str(host), __entry->serial, \ 219 cxl_event_log_type_str(__entry->log), \ 220 __entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\ 221 show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \ 222 __entry->hdr_related_handle, __entry->hdr_maint_op_class, \ 223 ##__VA_ARGS__) 224 225 TRACE_EVENT(cxl_generic_event, 226 227 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 228 const uuid_t *uuid, struct cxl_event_generic *gen_rec), 229 230 TP_ARGS(cxlmd, log, uuid, gen_rec), 231 232 TP_STRUCT__entry( 233 CXL_EVT_TP_entry 234 __array(u8, data, CXL_EVENT_RECORD_DATA_LENGTH) 235 ), 236 237 TP_fast_assign( 238 CXL_EVT_TP_fast_assign(cxlmd, log, gen_rec->hdr); 239 memcpy(&__entry->hdr_uuid, uuid, sizeof(uuid_t)); 240 memcpy(__entry->data, gen_rec->data, CXL_EVENT_RECORD_DATA_LENGTH); 241 ), 242 243 CXL_EVT_TP_printk("%s", 244 __print_hex(__entry->data, CXL_EVENT_RECORD_DATA_LENGTH)) 245 ); 246 247 /* 248 * Physical Address field masks 249 * 250 * General Media Event Record 251 * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 252 * 253 * DRAM Event Record 254 * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 255 */ 256 #define CXL_DPA_FLAGS_MASK GENMASK(1, 0) 257 #define CXL_DPA_MASK GENMASK_ULL(63, 6) 258 259 #define CXL_DPA_VOLATILE BIT(0) 260 #define CXL_DPA_NOT_REPAIRABLE BIT(1) 261 #define show_dpa_flags(flags) __print_flags(flags, "|", \ 262 { CXL_DPA_VOLATILE, "VOLATILE" }, \ 263 { CXL_DPA_NOT_REPAIRABLE, "NOT_REPAIRABLE" } \ 264 ) 265 266 /* 267 * General Media Event Record - GMER 268 * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 269 */ 270 #define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0) 271 #define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1) 272 #define CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW BIT(2) 273 #define show_event_desc_flags(flags) __print_flags(flags, "|", \ 274 { CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, "UNCORRECTABLE_EVENT" }, \ 275 { CXL_GMER_EVT_DESC_THRESHOLD_EVENT, "THRESHOLD_EVENT" }, \ 276 { CXL_GMER_EVT_DESC_POISON_LIST_OVERFLOW, "POISON_LIST_OVERFLOW" } \ 277 ) 278 279 #define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00 280 #define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01 281 #define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02 282 #define show_mem_event_type(type) __print_symbolic(type, \ 283 { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ 284 { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ 285 { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ 286 ) 287 288 #define CXL_GMER_TRANS_UNKNOWN 0x00 289 #define CXL_GMER_TRANS_HOST_READ 0x01 290 #define CXL_GMER_TRANS_HOST_WRITE 0x02 291 #define CXL_GMER_TRANS_HOST_SCAN_MEDIA 0x03 292 #define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04 293 #define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05 294 #define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06 295 #define show_trans_type(type) __print_symbolic(type, \ 296 { CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \ 297 { CXL_GMER_TRANS_HOST_READ, "Host Read" }, \ 298 { CXL_GMER_TRANS_HOST_WRITE, "Host Write" }, \ 299 { CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \ 300 { CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \ 301 { CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \ 302 { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \ 303 ) 304 305 #define CXL_GMER_VALID_CHANNEL BIT(0) 306 #define CXL_GMER_VALID_RANK BIT(1) 307 #define CXL_GMER_VALID_DEVICE BIT(2) 308 #define CXL_GMER_VALID_COMPONENT BIT(3) 309 #define show_valid_flags(flags) __print_flags(flags, "|", \ 310 { CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \ 311 { CXL_GMER_VALID_RANK, "RANK" }, \ 312 { CXL_GMER_VALID_DEVICE, "DEVICE" }, \ 313 { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \ 314 ) 315 316 TRACE_EVENT(cxl_general_media, 317 318 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 319 struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), 320 321 TP_ARGS(cxlmd, log, cxlr, hpa, rec), 322 323 TP_STRUCT__entry( 324 CXL_EVT_TP_entry 325 /* General Media */ 326 __field(u64, dpa) 327 __field(u8, descriptor) 328 __field(u8, type) 329 __field(u8, transaction_type) 330 __field(u8, channel) 331 __field(u32, device) 332 __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) 333 /* Following are out of order to pack trace record */ 334 __field(u64, hpa) 335 __field_struct(uuid_t, region_uuid) 336 __field(u16, validity_flags) 337 __field(u8, rank) 338 __field(u8, dpa_flags) 339 __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") 340 ), 341 342 TP_fast_assign( 343 CXL_EVT_TP_fast_assign(cxlmd, log, rec->media_hdr.hdr); 344 __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID; 345 346 /* General Media */ 347 __entry->dpa = le64_to_cpu(rec->media_hdr.phys_addr); 348 __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; 349 /* Mask after flags have been parsed */ 350 __entry->dpa &= CXL_DPA_MASK; 351 __entry->descriptor = rec->media_hdr.descriptor; 352 __entry->type = rec->media_hdr.type; 353 __entry->transaction_type = rec->media_hdr.transaction_type; 354 __entry->channel = rec->media_hdr.channel; 355 __entry->rank = rec->media_hdr.rank; 356 __entry->device = get_unaligned_le24(rec->device); 357 memcpy(__entry->comp_id, &rec->component_id, 358 CXL_EVENT_GEN_MED_COMP_ID_SIZE); 359 __entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags); 360 __entry->hpa = hpa; 361 if (cxlr) { 362 __assign_str(region_name); 363 uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); 364 } else { 365 __assign_str(region_name); 366 uuid_copy(&__entry->region_uuid, &uuid_null); 367 } 368 ), 369 370 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ 371 "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ 372 "device=%x comp_id=%s validity_flags='%s' " \ 373 "hpa=%llx region=%s region_uuid=%pUb", 374 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 375 show_event_desc_flags(__entry->descriptor), 376 show_mem_event_type(__entry->type), 377 show_trans_type(__entry->transaction_type), 378 __entry->channel, __entry->rank, __entry->device, 379 __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), 380 show_valid_flags(__entry->validity_flags), 381 __entry->hpa, __get_str(region_name), &__entry->region_uuid 382 ) 383 ); 384 385 /* 386 * DRAM Event Record - DER 387 * 388 * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 389 */ 390 /* 391 * DRAM Event Record defines many fields the same as the General Media Event 392 * Record. Reuse those definitions as appropriate. 393 */ 394 #define CXL_DER_VALID_CHANNEL BIT(0) 395 #define CXL_DER_VALID_RANK BIT(1) 396 #define CXL_DER_VALID_NIBBLE BIT(2) 397 #define CXL_DER_VALID_BANK_GROUP BIT(3) 398 #define CXL_DER_VALID_BANK BIT(4) 399 #define CXL_DER_VALID_ROW BIT(5) 400 #define CXL_DER_VALID_COLUMN BIT(6) 401 #define CXL_DER_VALID_CORRECTION_MASK BIT(7) 402 #define show_dram_valid_flags(flags) __print_flags(flags, "|", \ 403 { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \ 404 { CXL_DER_VALID_RANK, "RANK" }, \ 405 { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \ 406 { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \ 407 { CXL_DER_VALID_BANK, "BANK" }, \ 408 { CXL_DER_VALID_ROW, "ROW" }, \ 409 { CXL_DER_VALID_COLUMN, "COLUMN" }, \ 410 { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \ 411 ) 412 413 TRACE_EVENT(cxl_dram, 414 415 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 416 struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), 417 418 TP_ARGS(cxlmd, log, cxlr, hpa, rec), 419 420 TP_STRUCT__entry( 421 CXL_EVT_TP_entry 422 /* DRAM */ 423 __field(u64, dpa) 424 __field(u8, descriptor) 425 __field(u8, type) 426 __field(u8, transaction_type) 427 __field(u8, channel) 428 __field(u16, validity_flags) 429 __field(u16, column) /* Out of order to pack trace record */ 430 __field(u32, nibble_mask) 431 __field(u32, row) 432 __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) 433 __field(u64, hpa) 434 __field_struct(uuid_t, region_uuid) 435 __field(u8, rank) /* Out of order to pack trace record */ 436 __field(u8, bank_group) /* Out of order to pack trace record */ 437 __field(u8, bank) /* Out of order to pack trace record */ 438 __field(u8, dpa_flags) /* Out of order to pack trace record */ 439 __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") 440 ), 441 442 TP_fast_assign( 443 CXL_EVT_TP_fast_assign(cxlmd, log, rec->media_hdr.hdr); 444 __entry->hdr_uuid = CXL_EVENT_DRAM_UUID; 445 446 /* DRAM */ 447 __entry->dpa = le64_to_cpu(rec->media_hdr.phys_addr); 448 __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; 449 __entry->dpa &= CXL_DPA_MASK; 450 __entry->descriptor = rec->media_hdr.descriptor; 451 __entry->type = rec->media_hdr.type; 452 __entry->transaction_type = rec->media_hdr.transaction_type; 453 __entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags); 454 __entry->channel = rec->media_hdr.channel; 455 __entry->rank = rec->media_hdr.rank; 456 __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask); 457 __entry->bank_group = rec->bank_group; 458 __entry->bank = rec->bank; 459 __entry->row = get_unaligned_le24(rec->row); 460 __entry->column = get_unaligned_le16(rec->column); 461 memcpy(__entry->cor_mask, &rec->correction_mask, 462 CXL_EVENT_DER_CORRECTION_MASK_SIZE); 463 __entry->hpa = hpa; 464 if (cxlr) { 465 __assign_str(region_name); 466 uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); 467 } else { 468 __assign_str(region_name); 469 uuid_copy(&__entry->region_uuid, &uuid_null); 470 } 471 ), 472 473 CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ 474 "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ 475 "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ 476 "validity_flags='%s' " \ 477 "hpa=%llx region=%s region_uuid=%pUb", 478 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 479 show_event_desc_flags(__entry->descriptor), 480 show_mem_event_type(__entry->type), 481 show_trans_type(__entry->transaction_type), 482 __entry->channel, __entry->rank, __entry->nibble_mask, 483 __entry->bank_group, __entry->bank, 484 __entry->row, __entry->column, 485 __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), 486 show_dram_valid_flags(__entry->validity_flags), 487 __entry->hpa, __get_str(region_name), &__entry->region_uuid 488 ) 489 ); 490 491 /* 492 * Memory Module Event Record - MMER 493 * 494 * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45 495 */ 496 #define CXL_MMER_HEALTH_STATUS_CHANGE 0x00 497 #define CXL_MMER_MEDIA_STATUS_CHANGE 0x01 498 #define CXL_MMER_LIFE_USED_CHANGE 0x02 499 #define CXL_MMER_TEMP_CHANGE 0x03 500 #define CXL_MMER_DATA_PATH_ERROR 0x04 501 #define CXL_MMER_LSA_ERROR 0x05 502 #define show_dev_evt_type(type) __print_symbolic(type, \ 503 { CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \ 504 { CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \ 505 { CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \ 506 { CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \ 507 { CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \ 508 { CXL_MMER_LSA_ERROR, "LSA Error" } \ 509 ) 510 511 /* 512 * Device Health Information - DHI 513 * 514 * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100 515 */ 516 #define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0) 517 #define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1) 518 #define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2) 519 #define show_health_status_flags(flags) __print_flags(flags, "|", \ 520 { CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \ 521 { CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \ 522 { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \ 523 ) 524 525 #define CXL_DHI_MS_NORMAL 0x00 526 #define CXL_DHI_MS_NOT_READY 0x01 527 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOST 0x02 528 #define CXL_DHI_MS_ALL_DATA_LOST 0x03 529 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS 0x04 530 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN 0x05 531 #define CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT 0x06 532 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS 0x07 533 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN 0x08 534 #define CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT 0x09 535 #define show_media_status(ms) __print_symbolic(ms, \ 536 { CXL_DHI_MS_NORMAL, \ 537 "Normal" }, \ 538 { CXL_DHI_MS_NOT_READY, \ 539 "Not Ready" }, \ 540 { CXL_DHI_MS_WRITE_PERSISTENCY_LOST, \ 541 "Write Persistency Lost" }, \ 542 { CXL_DHI_MS_ALL_DATA_LOST, \ 543 "All Data Lost" }, \ 544 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_POWER_LOSS, \ 545 "Write Persistency Loss in the Event of Power Loss" }, \ 546 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_EVENT_SHUTDOWN, \ 547 "Write Persistency Loss in Event of Shutdown" }, \ 548 { CXL_DHI_MS_WRITE_PERSISTENCY_LOSS_IMMINENT, \ 549 "Write Persistency Loss Imminent" }, \ 550 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_POWER_LOSS, \ 551 "All Data Loss in Event of Power Loss" }, \ 552 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_EVENT_SHUTDOWN, \ 553 "All Data loss in the Event of Shutdown" }, \ 554 { CXL_DHI_MS_WRITE_ALL_DATA_LOSS_IMMINENT, \ 555 "All Data Loss Imminent" } \ 556 ) 557 558 #define CXL_DHI_AS_NORMAL 0x0 559 #define CXL_DHI_AS_WARNING 0x1 560 #define CXL_DHI_AS_CRITICAL 0x2 561 #define show_two_bit_status(as) __print_symbolic(as, \ 562 { CXL_DHI_AS_NORMAL, "Normal" }, \ 563 { CXL_DHI_AS_WARNING, "Warning" }, \ 564 { CXL_DHI_AS_CRITICAL, "Critical" } \ 565 ) 566 #define show_one_bit_status(as) __print_symbolic(as, \ 567 { CXL_DHI_AS_NORMAL, "Normal" }, \ 568 { CXL_DHI_AS_WARNING, "Warning" } \ 569 ) 570 571 #define CXL_DHI_AS_LIFE_USED(as) (as & 0x3) 572 #define CXL_DHI_AS_DEV_TEMP(as) ((as & 0xC) >> 2) 573 #define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4) 574 #define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5) 575 576 TRACE_EVENT(cxl_memory_module, 577 578 TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, 579 struct cxl_event_mem_module *rec), 580 581 TP_ARGS(cxlmd, log, rec), 582 583 TP_STRUCT__entry( 584 CXL_EVT_TP_entry 585 586 /* Memory Module Event */ 587 __field(u8, event_type) 588 589 /* Device Health Info */ 590 __field(u8, health_status) 591 __field(u8, media_status) 592 __field(u8, life_used) 593 __field(u32, dirty_shutdown_cnt) 594 __field(u32, cor_vol_err_cnt) 595 __field(u32, cor_per_err_cnt) 596 __field(s16, device_temp) 597 __field(u8, add_status) 598 ), 599 600 TP_fast_assign( 601 CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); 602 __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID; 603 604 /* Memory Module Event */ 605 __entry->event_type = rec->event_type; 606 607 /* Device Health Info */ 608 __entry->health_status = rec->info.health_status; 609 __entry->media_status = rec->info.media_status; 610 __entry->life_used = rec->info.life_used; 611 __entry->dirty_shutdown_cnt = get_unaligned_le32(rec->info.dirty_shutdown_cnt); 612 __entry->cor_vol_err_cnt = get_unaligned_le32(rec->info.cor_vol_err_cnt); 613 __entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt); 614 __entry->device_temp = get_unaligned_le16(rec->info.device_temp); 615 __entry->add_status = rec->info.add_status; 616 ), 617 618 CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \ 619 "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \ 620 "as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \ 621 "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u", 622 show_dev_evt_type(__entry->event_type), 623 show_health_status_flags(__entry->health_status), 624 show_media_status(__entry->media_status), 625 show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)), 626 show_two_bit_status(CXL_DHI_AS_DEV_TEMP(__entry->add_status)), 627 show_one_bit_status(CXL_DHI_AS_COR_VOL_ERR_CNT(__entry->add_status)), 628 show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)), 629 __entry->life_used, __entry->device_temp, 630 __entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt, 631 __entry->cor_per_err_cnt 632 ) 633 ); 634 635 #define show_poison_trace_type(type) \ 636 __print_symbolic(type, \ 637 { CXL_POISON_TRACE_LIST, "List" }, \ 638 { CXL_POISON_TRACE_INJECT, "Inject" }, \ 639 { CXL_POISON_TRACE_CLEAR, "Clear" }) 640 641 #define __show_poison_source(source) \ 642 __print_symbolic(source, \ 643 { CXL_POISON_SOURCE_UNKNOWN, "Unknown" }, \ 644 { CXL_POISON_SOURCE_EXTERNAL, "External" }, \ 645 { CXL_POISON_SOURCE_INTERNAL, "Internal" }, \ 646 { CXL_POISON_SOURCE_INJECTED, "Injected" }, \ 647 { CXL_POISON_SOURCE_VENDOR, "Vendor" }) 648 649 #define show_poison_source(source) \ 650 (((source > CXL_POISON_SOURCE_INJECTED) && \ 651 (source != CXL_POISON_SOURCE_VENDOR)) ? "Reserved" \ 652 : __show_poison_source(source)) 653 654 #define show_poison_flags(flags) \ 655 __print_flags(flags, "|", \ 656 { CXL_POISON_FLAG_MORE, "More" }, \ 657 { CXL_POISON_FLAG_OVERFLOW, "Overflow" }, \ 658 { CXL_POISON_FLAG_SCANNING, "Scanning" }) 659 660 #define __cxl_poison_addr(record) \ 661 (le64_to_cpu(record->address)) 662 #define cxl_poison_record_dpa(record) \ 663 (__cxl_poison_addr(record) & CXL_POISON_START_MASK) 664 #define cxl_poison_record_source(record) \ 665 (__cxl_poison_addr(record) & CXL_POISON_SOURCE_MASK) 666 #define cxl_poison_record_dpa_length(record) \ 667 (le32_to_cpu(record->length) * CXL_POISON_LEN_MULT) 668 #define cxl_poison_overflow(flags, time) \ 669 (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) 670 671 TRACE_EVENT(cxl_poison, 672 673 TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, 674 const struct cxl_poison_record *record, u8 flags, 675 __le64 overflow_ts, enum cxl_poison_trace_type trace_type), 676 677 TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type), 678 679 TP_STRUCT__entry( 680 __string(memdev, dev_name(&cxlmd->dev)) 681 __string(host, dev_name(cxlmd->dev.parent)) 682 __field(u64, serial) 683 __field(u8, trace_type) 684 __string(region, cxlr ? dev_name(&cxlr->dev) : "") 685 __field(u64, overflow_ts) 686 __field(u64, hpa) 687 __field(u64, dpa) 688 __field(u32, dpa_length) 689 __array(char, uuid, 16) 690 __field(u8, source) 691 __field(u8, flags) 692 ), 693 694 TP_fast_assign( 695 __assign_str(memdev); 696 __assign_str(host); 697 __entry->serial = cxlmd->cxlds->serial; 698 __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); 699 __entry->dpa = cxl_poison_record_dpa(record); 700 __entry->dpa_length = cxl_poison_record_dpa_length(record); 701 __entry->source = cxl_poison_record_source(record); 702 __entry->trace_type = trace_type; 703 __entry->flags = flags; 704 if (cxlr) { 705 __assign_str(region); 706 memcpy(__entry->uuid, &cxlr->params.uuid, 16); 707 __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd, 708 __entry->dpa); 709 } else { 710 __assign_str(region); 711 memset(__entry->uuid, 0, 16); 712 __entry->hpa = ULLONG_MAX; 713 } 714 ), 715 716 TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \ 717 "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \ 718 "source=%s flags=%s overflow_time=%llu", 719 __get_str(memdev), 720 __get_str(host), 721 __entry->serial, 722 show_poison_trace_type(__entry->trace_type), 723 __get_str(region), 724 __entry->uuid, 725 __entry->hpa, 726 __entry->dpa, 727 __entry->dpa_length, 728 show_poison_source(__entry->source), 729 show_poison_flags(__entry->flags), 730 __entry->overflow_ts 731 ) 732 ); 733 734 #endif /* _CXL_EVENTS_H */ 735 736 #define TRACE_INCLUDE_FILE trace 737 #include <trace/define_trace.h> 738