1 // SPDX-License-Identifier: GPL-2.0 2 3 // Copyright (C) 2025 Google LLC. 4 5 //! Rust API for bitmap. 6 //! 7 //! C headers: [`include/linux/bitmap.h`](srctree/include/linux/bitmap.h). 8 9 use crate::alloc::{AllocError, Flags}; 10 use crate::bindings; 11 #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))] 12 use crate::pr_err; 13 use core::ptr::NonNull; 14 15 const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize; 16 17 /// Represents a C bitmap. Wraps underlying C bitmap API. 18 /// 19 /// # Invariants 20 /// 21 /// Must reference a `[c_ulong]` long enough to fit `data.len()` bits. 22 #[cfg_attr(CONFIG_64BIT, repr(align(8)))] 23 #[cfg_attr(not(CONFIG_64BIT), repr(align(4)))] 24 pub struct Bitmap { 25 data: [()], 26 } 27 28 impl Bitmap { 29 /// Borrows a C bitmap. 30 /// 31 /// # Safety 32 /// 33 /// * `ptr` holds a non-null address of an initialized array of `unsigned long` 34 /// that is large enough to hold `nbits` bits. 35 /// * the array must not be freed for the lifetime of this [`Bitmap`] 36 /// * concurrent access only happens through atomic operations from_raw<'a>(ptr: *const usize, nbits: usize) -> &'a Bitmap37 pub unsafe fn from_raw<'a>(ptr: *const usize, nbits: usize) -> &'a Bitmap { 38 let data: *const [()] = core::ptr::slice_from_raw_parts(ptr.cast(), nbits); 39 // INVARIANT: `data` references an initialized array that can hold `nbits` bits. 40 // SAFETY: 41 // The caller guarantees that `data` (derived from `ptr` and `nbits`) 42 // points to a valid, initialized, and appropriately sized memory region 43 // that will not be freed for the lifetime 'a. 44 // We are casting `*const [()]` to `*const Bitmap`. The `Bitmap` 45 // struct is a ZST with a `data: [()]` field. This means its layout 46 // is compatible with a slice of `()`, and effectively it's a "thin pointer" 47 // (its size is 0 and alignment is 1). The `slice_from_raw_parts` 48 // function correctly encodes the length (number of bits, not elements) 49 // into the metadata of the fat pointer. Therefore, dereferencing this 50 // pointer as `&Bitmap` is safe given the caller's guarantees. 51 unsafe { &*(data as *const Bitmap) } 52 } 53 54 /// Borrows a C bitmap exclusively. 55 /// 56 /// # Safety 57 /// 58 /// * `ptr` holds a non-null address of an initialized array of `unsigned long` 59 /// that is large enough to hold `nbits` bits. 60 /// * the array must not be freed for the lifetime of this [`Bitmap`] 61 /// * no concurrent access may happen. from_raw_mut<'a>(ptr: *mut usize, nbits: usize) -> &'a mut Bitmap62 pub unsafe fn from_raw_mut<'a>(ptr: *mut usize, nbits: usize) -> &'a mut Bitmap { 63 let data: *mut [()] = core::ptr::slice_from_raw_parts_mut(ptr.cast(), nbits); 64 // INVARIANT: `data` references an initialized array that can hold `nbits` bits. 65 // SAFETY: 66 // The caller guarantees that `data` (derived from `ptr` and `nbits`) 67 // points to a valid, initialized, and appropriately sized memory region 68 // that will not be freed for the lifetime 'a. 69 // Furthermore, the caller guarantees no concurrent access will happen, 70 // which upholds the exclusivity requirement for a mutable reference. 71 // Similar to `from_raw`, casting `*mut [()]` to `*mut Bitmap` is 72 // safe because `Bitmap` is a ZST with a `data: [()]` field, 73 // making its layout compatible with a slice of `()`. 74 unsafe { &mut *(data as *mut Bitmap) } 75 } 76 77 /// Returns a raw pointer to the backing [`Bitmap`]. as_ptr(&self) -> *const usize78 pub fn as_ptr(&self) -> *const usize { 79 core::ptr::from_ref::<Bitmap>(self).cast::<usize>() 80 } 81 82 /// Returns a mutable raw pointer to the backing [`Bitmap`]. as_mut_ptr(&mut self) -> *mut usize83 pub fn as_mut_ptr(&mut self) -> *mut usize { 84 core::ptr::from_mut::<Bitmap>(self).cast::<usize>() 85 } 86 87 /// Returns length of this [`Bitmap`]. 88 #[expect(clippy::len_without_is_empty)] len(&self) -> usize89 pub fn len(&self) -> usize { 90 self.data.len() 91 } 92 } 93 94 /// Holds either a pointer to array of `unsigned long` or a small bitmap. 95 #[repr(C)] 96 union BitmapRepr { 97 bitmap: usize, 98 ptr: NonNull<usize>, 99 } 100 101 macro_rules! bitmap_assert { 102 ($cond:expr, $($arg:tt)+) => { 103 #[cfg(CONFIG_RUST_BITMAP_HARDENED)] 104 assert!($cond, $($arg)*); 105 } 106 } 107 108 macro_rules! bitmap_assert_return { 109 ($cond:expr, $($arg:tt)+) => { 110 #[cfg(CONFIG_RUST_BITMAP_HARDENED)] 111 assert!($cond, $($arg)*); 112 113 #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))] 114 if !($cond) { 115 pr_err!($($arg)*); 116 return 117 } 118 } 119 } 120 121 /// Represents an owned bitmap. 122 /// 123 /// Wraps underlying C bitmap API. See [`Bitmap`] for available 124 /// methods. 125 /// 126 /// # Examples 127 /// 128 /// Basic usage 129 /// 130 /// ``` 131 /// use kernel::alloc::flags::GFP_KERNEL; 132 /// use kernel::bitmap::BitmapVec; 133 /// 134 /// let mut b = BitmapVec::new(16, GFP_KERNEL)?; 135 /// 136 /// assert_eq!(16, b.len()); 137 /// for i in 0..16 { 138 /// if i % 4 == 0 { 139 /// b.set_bit(i); 140 /// } 141 /// } 142 /// assert_eq!(Some(0), b.next_bit(0)); 143 /// assert_eq!(Some(1), b.next_zero_bit(0)); 144 /// assert_eq!(Some(4), b.next_bit(1)); 145 /// assert_eq!(Some(5), b.next_zero_bit(4)); 146 /// assert_eq!(Some(12), b.last_bit()); 147 /// # Ok::<(), Error>(()) 148 /// ``` 149 /// 150 /// # Invariants 151 /// 152 /// * `nbits` is `<= i32::MAX` and never changes. 153 /// * if `nbits <= bindings::BITS_PER_LONG`, then `repr` is a `usize`. 154 /// * otherwise, `repr` holds a non-null pointer to an initialized 155 /// array of `unsigned long` that is large enough to hold `nbits` bits. 156 pub struct BitmapVec { 157 /// Representation of bitmap. 158 repr: BitmapRepr, 159 /// Length of this bitmap. Must be `<= i32::MAX`. 160 nbits: usize, 161 } 162 163 impl core::ops::Deref for BitmapVec { 164 type Target = Bitmap; 165 deref(&self) -> &Bitmap166 fn deref(&self) -> &Bitmap { 167 let ptr = if self.nbits <= BITS_PER_LONG { 168 // SAFETY: Bitmap is represented inline. 169 #[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")] 170 unsafe { core::ptr::addr_of!(self.repr.bitmap) } 171 } else { 172 // SAFETY: Bitmap is represented as array of `unsigned long`. 173 unsafe { self.repr.ptr.as_ptr() } 174 }; 175 176 // SAFETY: We got the right pointer and invariants of [`Bitmap`] hold. 177 // An inline bitmap is treated like an array with single element. 178 unsafe { Bitmap::from_raw(ptr, self.nbits) } 179 } 180 } 181 182 impl core::ops::DerefMut for BitmapVec { deref_mut(&mut self) -> &mut Bitmap183 fn deref_mut(&mut self) -> &mut Bitmap { 184 let ptr = if self.nbits <= BITS_PER_LONG { 185 // SAFETY: Bitmap is represented inline. 186 #[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")] 187 unsafe { core::ptr::addr_of_mut!(self.repr.bitmap) } 188 } else { 189 // SAFETY: Bitmap is represented as array of `unsigned long`. 190 unsafe { self.repr.ptr.as_ptr() } 191 }; 192 193 // SAFETY: We got the right pointer and invariants of [`BitmapVec`] hold. 194 // An inline bitmap is treated like an array with single element. 195 unsafe { Bitmap::from_raw_mut(ptr, self.nbits) } 196 } 197 } 198 199 /// Enable ownership transfer to other threads. 200 /// 201 /// SAFETY: We own the underlying bitmap representation. 202 unsafe impl Send for BitmapVec {} 203 204 /// Enable unsynchronized concurrent access to [`BitmapVec`] through shared references. 205 /// 206 /// SAFETY: `deref()` will return a reference to a [`Bitmap`]. Its methods 207 /// take immutable references are either atomic or read-only. 208 unsafe impl Sync for BitmapVec {} 209 210 impl Drop for BitmapVec { drop(&mut self)211 fn drop(&mut self) { 212 if self.nbits <= BITS_PER_LONG { 213 return; 214 } 215 // SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`. 216 // 217 // INVARIANT: there is no other use of the `self.ptr` after this 218 // call and the value is being dropped so the broken invariant is 219 // not observable on function exit. 220 unsafe { bindings::bitmap_free(self.repr.ptr.as_ptr()) }; 221 } 222 } 223 224 impl BitmapVec { 225 /// Constructs a new [`BitmapVec`]. 226 /// 227 /// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This 228 /// includes the case when `nbits` is greater than `i32::MAX`. 229 #[inline] new(nbits: usize, flags: Flags) -> Result<Self, AllocError>230 pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> { 231 if nbits <= BITS_PER_LONG { 232 return Ok(BitmapVec { 233 repr: BitmapRepr { bitmap: 0 }, 234 nbits, 235 }); 236 } 237 if nbits > i32::MAX.try_into().unwrap() { 238 return Err(AllocError); 239 } 240 let nbits_u32 = u32::try_from(nbits).unwrap(); 241 // SAFETY: `BITS_PER_LONG < nbits` and `nbits <= i32::MAX`. 242 let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) }; 243 let ptr = NonNull::new(ptr).ok_or(AllocError)?; 244 // INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked. 245 Ok(BitmapVec { 246 repr: BitmapRepr { ptr }, 247 nbits, 248 }) 249 } 250 251 /// Returns length of this [`Bitmap`]. 252 #[allow(clippy::len_without_is_empty)] 253 #[inline] len(&self) -> usize254 pub fn len(&self) -> usize { 255 self.nbits 256 } 257 258 /// Fills this `Bitmap` with random bits. 259 #[cfg(CONFIG_FIND_BIT_BENCHMARK_RUST)] fill_random(&mut self)260 pub fn fill_random(&mut self) { 261 // SAFETY: `self.as_mut_ptr` points to either an array of the 262 // appropriate length or one usize. 263 unsafe { 264 bindings::get_random_bytes( 265 self.as_mut_ptr().cast::<ffi::c_void>(), 266 usize::div_ceil(self.nbits, bindings::BITS_PER_LONG as usize) 267 * bindings::BITS_PER_LONG as usize 268 / 8, 269 ); 270 } 271 } 272 } 273 274 impl Bitmap { 275 /// Set bit with index `index`. 276 /// 277 /// ATTENTION: `set_bit` is non-atomic, which differs from the naming 278 /// convention in C code. The corresponding C function is `__set_bit`. 279 /// 280 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than 281 /// or equal to `self.nbits`, does nothing. 282 /// 283 /// # Panics 284 /// 285 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than 286 /// or equal to `self.nbits`. 287 #[inline] set_bit(&mut self, index: usize)288 pub fn set_bit(&mut self, index: usize) { 289 bitmap_assert_return!( 290 index < self.len(), 291 "Bit `index` must be < {}, was {}", 292 self.len(), 293 index 294 ); 295 // SAFETY: Bit `index` is within bounds. 296 unsafe { bindings::__set_bit(index, self.as_mut_ptr()) }; 297 } 298 299 /// Set bit with index `index`, atomically. 300 /// 301 /// This is a relaxed atomic operation (no implied memory barriers). 302 /// 303 /// ATTENTION: The naming convention differs from C, where the corresponding 304 /// function is called `set_bit`. 305 /// 306 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than 307 /// or equal to `self.len()`, does nothing. 308 /// 309 /// # Panics 310 /// 311 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than 312 /// or equal to `self.len()`. 313 #[inline] set_bit_atomic(&self, index: usize)314 pub fn set_bit_atomic(&self, index: usize) { 315 bitmap_assert_return!( 316 index < self.len(), 317 "Bit `index` must be < {}, was {}", 318 self.len(), 319 index 320 ); 321 // SAFETY: `index` is within bounds and the caller has ensured that 322 // there is no mix of non-atomic and atomic operations. 323 unsafe { bindings::set_bit(index, self.as_ptr().cast_mut()) }; 324 } 325 326 /// Clear `index` bit. 327 /// 328 /// ATTENTION: `clear_bit` is non-atomic, which differs from the naming 329 /// convention in C code. The corresponding C function is `__clear_bit`. 330 /// 331 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than 332 /// or equal to `self.len()`, does nothing. 333 /// 334 /// # Panics 335 /// 336 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than 337 /// or equal to `self.len()`. 338 #[inline] clear_bit(&mut self, index: usize)339 pub fn clear_bit(&mut self, index: usize) { 340 bitmap_assert_return!( 341 index < self.len(), 342 "Bit `index` must be < {}, was {}", 343 self.len(), 344 index 345 ); 346 // SAFETY: `index` is within bounds. 347 unsafe { bindings::__clear_bit(index, self.as_mut_ptr()) }; 348 } 349 350 /// Clear `index` bit, atomically. 351 /// 352 /// This is a relaxed atomic operation (no implied memory barriers). 353 /// 354 /// ATTENTION: The naming convention differs from C, where the corresponding 355 /// function is called `clear_bit`. 356 /// 357 /// If CONFIG_RUST_BITMAP_HARDENED is not enabled and `index` is greater than 358 /// or equal to `self.len()`, does nothing. 359 /// 360 /// # Panics 361 /// 362 /// Panics if CONFIG_RUST_BITMAP_HARDENED is enabled and `index` is greater than 363 /// or equal to `self.len()`. 364 #[inline] clear_bit_atomic(&self, index: usize)365 pub fn clear_bit_atomic(&self, index: usize) { 366 bitmap_assert_return!( 367 index < self.len(), 368 "Bit `index` must be < {}, was {}", 369 self.len(), 370 index 371 ); 372 // SAFETY: `index` is within bounds and the caller has ensured that 373 // there is no mix of non-atomic and atomic operations. 374 unsafe { bindings::clear_bit(index, self.as_ptr().cast_mut()) }; 375 } 376 377 /// Copy `src` into this [`Bitmap`] and set any remaining bits to zero. 378 /// 379 /// # Examples 380 /// 381 /// ``` 382 /// use kernel::alloc::{AllocError, flags::GFP_KERNEL}; 383 /// use kernel::bitmap::BitmapVec; 384 /// 385 /// let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?; 386 /// 387 /// assert_eq!(None, long_bitmap.last_bit()); 388 /// 389 /// let mut short_bitmap = BitmapVec::new(16, GFP_KERNEL)?; 390 /// 391 /// short_bitmap.set_bit(7); 392 /// long_bitmap.copy_and_extend(&short_bitmap); 393 /// assert_eq!(Some(7), long_bitmap.last_bit()); 394 /// 395 /// # Ok::<(), AllocError>(()) 396 /// ``` 397 #[inline] copy_and_extend(&mut self, src: &Bitmap)398 pub fn copy_and_extend(&mut self, src: &Bitmap) { 399 let len = core::cmp::min(src.len(), self.len()); 400 // SAFETY: access to `self` and `src` is within bounds. 401 unsafe { 402 bindings::bitmap_copy_and_extend( 403 self.as_mut_ptr(), 404 src.as_ptr(), 405 len as u32, 406 self.len() as u32, 407 ) 408 }; 409 } 410 411 /// Finds last set bit. 412 /// 413 /// # Examples 414 /// 415 /// ``` 416 /// use kernel::alloc::{AllocError, flags::GFP_KERNEL}; 417 /// use kernel::bitmap::BitmapVec; 418 /// 419 /// let bitmap = BitmapVec::new(64, GFP_KERNEL)?; 420 /// 421 /// match bitmap.last_bit() { 422 /// Some(idx) => { 423 /// pr_info!("The last bit has index {idx}.\n"); 424 /// } 425 /// None => { 426 /// pr_info!("All bits in this bitmap are 0.\n"); 427 /// } 428 /// } 429 /// # Ok::<(), AllocError>(()) 430 /// ``` 431 #[inline] last_bit(&self) -> Option<usize>432 pub fn last_bit(&self) -> Option<usize> { 433 // SAFETY: `_find_next_bit` access is within bounds due to invariant. 434 let index = unsafe { bindings::_find_last_bit(self.as_ptr(), self.len()) }; 435 if index >= self.len() { 436 None 437 } else { 438 Some(index) 439 } 440 } 441 442 /// Finds next set bit, starting from `start`. 443 /// 444 /// Returns `None` if `start` is greater or equal to `self.nbits`. 445 #[inline] next_bit(&self, start: usize) -> Option<usize>446 pub fn next_bit(&self, start: usize) -> Option<usize> { 447 bitmap_assert!( 448 start < self.len(), 449 "`start` must be < {} was {}", 450 self.len(), 451 start 452 ); 453 // SAFETY: `_find_next_bit` tolerates out-of-bounds arguments and returns a 454 // value larger than or equal to `self.len()` in that case. 455 let index = unsafe { bindings::_find_next_bit(self.as_ptr(), self.len(), start) }; 456 if index >= self.len() { 457 None 458 } else { 459 Some(index) 460 } 461 } 462 463 /// Finds next zero bit, starting from `start`. 464 /// Returns `None` if `start` is greater than or equal to `self.len()`. 465 #[inline] next_zero_bit(&self, start: usize) -> Option<usize>466 pub fn next_zero_bit(&self, start: usize) -> Option<usize> { 467 bitmap_assert!( 468 start < self.len(), 469 "`start` must be < {} was {}", 470 self.len(), 471 start 472 ); 473 // SAFETY: `_find_next_zero_bit` tolerates out-of-bounds arguments and returns a 474 // value larger than or equal to `self.len()` in that case. 475 let index = unsafe { bindings::_find_next_zero_bit(self.as_ptr(), self.len(), start) }; 476 if index >= self.len() { 477 None 478 } else { 479 Some(index) 480 } 481 } 482 } 483 484 use macros::kunit_tests; 485 486 #[kunit_tests(rust_kernel_bitmap)] 487 mod tests { 488 use super::*; 489 use kernel::alloc::flags::GFP_KERNEL; 490 491 #[test] bitmap_borrow()492 fn bitmap_borrow() { 493 let fake_bitmap: [usize; 2] = [0, 0]; 494 // SAFETY: `fake_c_bitmap` is an array of expected length. 495 let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), 2 * BITS_PER_LONG) }; 496 assert_eq!(2 * BITS_PER_LONG, b.len()); 497 assert_eq!(None, b.next_bit(0)); 498 } 499 500 #[test] bitmap_copy()501 fn bitmap_copy() { 502 let fake_bitmap: usize = 0xFF; 503 // SAFETY: `fake_c_bitmap` can be used as one-element array of expected length. 504 let b = unsafe { Bitmap::from_raw(core::ptr::addr_of!(fake_bitmap), 8) }; 505 assert_eq!(8, b.len()); 506 assert_eq!(None, b.next_zero_bit(0)); 507 } 508 509 #[test] bitmap_vec_new() -> Result<(), AllocError>510 fn bitmap_vec_new() -> Result<(), AllocError> { 511 let b = BitmapVec::new(0, GFP_KERNEL)?; 512 assert_eq!(0, b.len()); 513 514 let b = BitmapVec::new(3, GFP_KERNEL)?; 515 assert_eq!(3, b.len()); 516 517 let b = BitmapVec::new(1024, GFP_KERNEL)?; 518 assert_eq!(1024, b.len()); 519 520 // Requesting too large values results in [`AllocError`]. 521 let res = BitmapVec::new(1 << 31, GFP_KERNEL); 522 assert!(res.is_err()); 523 Ok(()) 524 } 525 526 #[test] bitmap_set_clear_find() -> Result<(), AllocError>527 fn bitmap_set_clear_find() -> Result<(), AllocError> { 528 let mut b = BitmapVec::new(128, GFP_KERNEL)?; 529 530 // Zero-initialized 531 assert_eq!(None, b.next_bit(0)); 532 assert_eq!(Some(0), b.next_zero_bit(0)); 533 assert_eq!(None, b.last_bit()); 534 535 b.set_bit(17); 536 537 assert_eq!(Some(17), b.next_bit(0)); 538 assert_eq!(Some(17), b.next_bit(17)); 539 assert_eq!(None, b.next_bit(18)); 540 assert_eq!(Some(17), b.last_bit()); 541 542 b.set_bit(107); 543 544 assert_eq!(Some(17), b.next_bit(0)); 545 assert_eq!(Some(17), b.next_bit(17)); 546 assert_eq!(Some(107), b.next_bit(18)); 547 assert_eq!(Some(107), b.last_bit()); 548 549 b.clear_bit(17); 550 551 assert_eq!(Some(107), b.next_bit(0)); 552 assert_eq!(Some(107), b.last_bit()); 553 Ok(()) 554 } 555 556 #[test] owned_bitmap_out_of_bounds() -> Result<(), AllocError>557 fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> { 558 // TODO: Kunit #[test]s do not support `cfg` yet, 559 // so we add it here in the body. 560 #[cfg(not(CONFIG_RUST_BITMAP_HARDENED))] 561 { 562 let mut b = BitmapVec::new(128, GFP_KERNEL)?; 563 b.set_bit(2048); 564 b.set_bit_atomic(2048); 565 b.clear_bit(2048); 566 b.clear_bit_atomic(2048); 567 assert_eq!(None, b.next_bit(2048)); 568 assert_eq!(None, b.next_zero_bit(2048)); 569 assert_eq!(None, b.last_bit()); 570 } 571 Ok(()) 572 } 573 574 // TODO: uncomment once kunit supports [should_panic] and `cfg`. 575 // #[cfg(CONFIG_RUST_BITMAP_HARDENED)] 576 // #[test] 577 // #[should_panic] 578 // fn owned_bitmap_out_of_bounds() -> Result<(), AllocError> { 579 // let mut b = BitmapVec::new(128, GFP_KERNEL)?; 580 // 581 // b.set_bit(2048); 582 // } 583 584 #[test] bitmap_copy_and_extend() -> Result<(), AllocError>585 fn bitmap_copy_and_extend() -> Result<(), AllocError> { 586 let mut long_bitmap = BitmapVec::new(256, GFP_KERNEL)?; 587 588 long_bitmap.set_bit(3); 589 long_bitmap.set_bit(200); 590 591 let mut short_bitmap = BitmapVec::new(32, GFP_KERNEL)?; 592 593 short_bitmap.set_bit(17); 594 595 long_bitmap.copy_and_extend(&short_bitmap); 596 597 // Previous bits have been cleared. 598 assert_eq!(Some(17), long_bitmap.next_bit(0)); 599 assert_eq!(Some(17), long_bitmap.last_bit()); 600 Ok(()) 601 } 602 } 603