Lines Matching +full:acquisition +full:- +full:time +full:- +full:ns

37   +------------------------------------+------------------------------------+
41 +------------------------------------+------------------------------------+
43 +------------------------------------+------------------------------------+
45 +------------------------------------+------------------------------------+
47 +------------------------------------+------------------------------------+
49 +------------------------------------+------------------------------------+
51 +------------------------------------+------------------------------------+
57 +------------------------------------+------------------------------------+
61 +------------------------------------+------------------------------------+
63 +------------------------------------+------------------------------------+
65 +------------------------------------+------------------------------------+
67 +------------------------------------+------------------------------------+
69 +------------------------------------+------------------------------------+
71 +------------------------------------+------------------------------------+
75 ------------------------------------
90 region at any time. There are many friendly primitives in the Linux
102 -----------------------------------------------------
106 single-holder lock: if you can't get the spinlock, you keep trying
122 ------------------------------
126 design decision: when no-one else can run at the same time, there is no
142 ----------------------------
154 nf_register_sockopt(). Registration and de-registration
155 are only done on module load and unload (and boot time, where there is
162 -----------------------------------------
184 -----------------------------------------
190 ---------------------------------------
197 -------------------------------
219 ------------------------
226 The same softirq can run on the other CPUs: you can use a per-CPU array
227 (see `Per-CPU Data`_) for better performance. If you're
250 ----------------------------------------------
284 -------------------------------------
288 architecture-specific whether all interrupts are disabled inside irq
296 - If you are in a process context (any syscall) and want to lock other
300 - Otherwise (== data can be touched in an interrupt), use
304 - Avoid holding spinlock for more than 5 lines of code and across any
308 -----------------------------
312 one CPU at a time, so no locking is required for that context (eg. a
313 particular thread can only run on one CPU at a time, but if it needs
337 +--------+----------------------------+
339 +--------+----------------------------+
341 +--------+----------------------------+
343 +--------+----------------------------+
345 +--------+----------------------------+
347 +--------+----------------------------+
360 spin_trylock() does not spin but returns non-zero if it
367 non-zero if it could lock the mutex on the first try or 0 if not. This
379 -------------------
411 if (i->id == id) {
412 i->popularity++;
422 list_del(&obj->list);
424 cache_num--;
430 list_add(&obj->list, &cache);
434 if (!outcast || i->popularity < outcast->popularity)
446 return -ENOMEM;
448 strscpy(obj->name, name, sizeof(obj->name));
449 obj->id = id;
450 obj->popularity = 0;
468 int ret = -ENOENT;
474 strcpy(name, obj->name);
488 grabbing the lock. This is safe, as no-one else can access it until we
492 --------------------------------
498 The change is shown below, in standard patch format: the ``-`` are lines
503 --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
504 +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100
505 @@ -12,7 +12,7 @@
509 -static DEFINE_MUTEX(cache_lock);
514 @@ -55,6 +55,7 @@
521 return -ENOMEM;
522 @@ -63,30 +64,33 @@
523 obj->id = id;
524 obj->popularity = 0;
526 - mutex_lock(&cache_lock);
529 - mutex_unlock(&cache_lock);
536 - mutex_lock(&cache_lock);
541 - mutex_unlock(&cache_lock);
548 int ret = -ENOENT;
551 - mutex_lock(&cache_lock);
556 strcpy(name, obj->name);
558 - mutex_unlock(&cache_lock);
575 ----------------------------------
580 id every time. This produces two problems.
583 we'd need to make this non-static so the rest of the code can use it.
590 worse, add another object, re-using the same address.
592 As there is only one lock, you can't hold it forever: no-one else would
602 --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100
603 +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100
604 @@ -7,6 +7,7 @@
612 @@ -17,6 +18,35 @@
618 + if (--obj->refcnt == 0)
624 + obj->refcnt++;
648 @@ -35,6 +65,7 @@
651 list_del(&obj->list);
653 cache_num--;
656 @@ -63,6 +94,7 @@
657 strscpy(obj->name, name, sizeof(obj->name));
658 obj->id = id;
659 obj->popularity = 0;
660 + obj->refcnt = 1; /* The cache holds a reference */
664 @@ -79,18 +111,15 @@
668 -int cache_find(int id, char *name)
672 - int ret = -ENOENT;
677 - if (obj) {
678 - ret = 0;
679 - strcpy(name, obj->name);
680 - }
684 - return ret;
706 although for anything non-trivial using spinlocks is clearer. The
713 --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100
714 +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100
715 @@ -7,7 +7,7 @@
719 - unsigned int refcnt;
724 @@ -18,33 +18,15 @@
728 -static void __object_put(struct object *obj)
729 -{
730 - if (--obj->refcnt == 0)
731 - kfree(obj);
732 -}
733 -
734 -static void __object_get(struct object *obj)
735 -{
736 - obj->refcnt++;
737 -}
738 -
741 - unsigned long flags;
742 -
743 - spin_lock_irqsave(&cache_lock, flags);
744 - __object_put(obj);
745 - spin_unlock_irqrestore(&cache_lock, flags);
746 + if (atomic_dec_and_test(&obj->refcnt))
752 - unsigned long flags;
753 -
754 - spin_lock_irqsave(&cache_lock, flags);
755 - __object_get(obj);
756 - spin_unlock_irqrestore(&cache_lock, flags);
757 + atomic_inc(&obj->refcnt);
761 @@ -65,7 +47,7 @@
764 list_del(&obj->list);
765 - __object_put(obj);
767 cache_num--;
770 @@ -94,7 +76,7 @@
771 strscpy(obj->name, name, sizeof(obj->name));
772 obj->id = id;
773 obj->popularity = 0;
774 - obj->refcnt = 1; /* The cache holds a reference */
775 + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
779 @@ -119,7 +101,7 @@
783 - __object_get(obj);
790 ---------------------------------
796 - You can make ``cache_lock`` non-static, and tell people to grab that
799 - You can provide a cache_obj_rename() which grabs this
803 - You can make the ``cache_lock`` protect only the cache itself, and
806 Theoretically, you can make the locks as fine-grained as one lock for
810 - One lock which protects the infrastructure (the ``cache`` list in
813 - One lock which protects the infrastructure (including the list
817 - Multiple locks to protect the infrastructure (eg. one lock per hash
818 chain), possibly with a separate per-object lock.
820 Here is the "lock-per-object" implementation:
824 --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100
825 +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
826 @@ -6,11 +6,17 @@
841 - int popularity;
845 @@ -77,6 +84,7 @@
846 obj->id = id;
847 obj->popularity = 0;
848 atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
849 + spin_lock_init(&obj->lock);
855 ``cache_lock`` rather than the per-object lock: this is because it (like
875 -----------------------------
881 stay-up-five-nights-talk-to-fluffy-code-bunnies kind of problem.
899 A more complex problem is the so-called 'deadly embrace', involving two
913 +-----------------------+-----------------------+
916 | Grab lock A -> OK | Grab lock B -> OK |
917 +-----------------------+-----------------------+
918 | Grab lock B -> spin | Grab lock A -> spin |
919 +-----------------------+-----------------------+
927 -------------------
936 are never held around calls to non-trivial functions outside the same
955 -------------------------------
969 struct foo *next = list->next;
970 timer_delete(&list->timer);
992 struct foo *next = list->next;
993 if (!timer_delete(&list->timer)) {
1021 time taken to actually acquire and release an uncontended lock. Third is
1030 Acquisition times depend on how much damage the lock operations do to
1032 the last one to grab the lock (ie. is the lock cache-hot for this CPU):
1034 700MHz Intel Pentium III: an instruction takes about 0.7ns, an atomic
1035 increment takes about 58ns, a lock which is cache-hot on this CPU takes
1036 160ns, and a cacheline transfer from another CPU takes an additional 170
1037 to 360ns. (These figures from Paul McKenney's `Linux Journal RCU
1040 These two aims conflict: holding a lock for a short time might be done
1041 by splitting locks into parts (such as in our final per-object-lock
1050 ------------------------
1060 does), and the lock is held by readers for significant lengths of time,
1065 --------------------------------
1070 waste of time), it is a candidate for this optimization.
1078 new->next = list->next;
1080 list->next = new;
1102 list->next = old->next;
1111 don't realize that the pre-fetched contents is wrong when the ``next``
1125 destroy the object once all pre-existing readers are finished.
1127 until all pre-existing are finished.
1143 --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
1144 +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100
1145 @@ -1,15 +1,18 @@
1155 - /* These two protected by cache_lock. */
1165 @@ -40,7 +43,7 @@
1169 - list_for_each_entry(i, &cache, list) {
1171 if (i->id == id) {
1172 i->popularity++;
1174 @@ -49,19 +52,25 @@
1188 - list_del(&obj->list);
1189 - object_put(obj);
1190 + list_del_rcu(&obj->list);
1191 cache_num--;
1192 + call_rcu(&obj->rcu, cache_delete_rcu);
1198 - list_add(&obj->list, &cache);
1199 + list_add_rcu(&obj->list, &cache);
1203 @@ -104,12 +114,11 @@
1207 - unsigned long flags;
1209 - spin_lock_irqsave(&cache_lock, flags);
1214 - spin_unlock_irqrestore(&cache_lock, flags);
1239 __cache_find() by making it non-static, and such
1246 Per-CPU Data
1247 ------------
1260 Of particular use for simple per-cpu counters is the ``local_t`` type,
1270 ----------------------------------------
1288 The spinlock prevents any other accesses happening at the same time.
1302 --------------------------
1310 - Accesses to userspace:
1312 - copy_from_user()
1314 - copy_to_user()
1316 - get_user()
1318 - put_user()
1320 - kmalloc(GP_KERNEL) <kmalloc>`
1322 - mutex_lock_interruptible() and
1332 --------------------------------
1337 - printk()
1339 - kfree()
1341 - add_timer() and timer_delete()
1346 .. kernel-doc:: include/linux/mutex.h
1349 .. kernel-doc:: kernel/locking/mutex.c
1355 .. kernel-doc:: kernel/futex/core.c
1358 .. kernel-doc:: kernel/futex/futex.h
1361 .. kernel-doc:: kernel/futex/pi.c
1364 .. kernel-doc:: kernel/futex/requeue.c
1367 .. kernel-doc:: kernel/futex/waitwake.c
1373 - ``Documentation/locking/spinlocks.rst``: Linus Torvalds' spinlocking
1376 - Unix Systems for Modern Architectures: Symmetric Multiprocessing and
1412 half will be running at any time.
1423 Symmetric Multi-Processor: kernels compiled for multiple-CPU machines.
1436 A dynamically-registrable software interrupt, which is guaranteed to
1437 only run on one CPU at a time.
1440 A dynamically-registrable software interrupt, which is run at (or close
1441 to) a given time. When running, it is just like a tasklet (in fact, they
1445 Uni-Processor: Non-SMP. (``CONFIG_SMP=n``).