ObjFW
mutex.h
1 /*
2  * Copyright (c) 2008-2021 Jonathan Schleifer <js@nil.im>
3  *
4  * All rights reserved.
5  *
6  * This file is part of ObjFW. It may be distributed under the terms of the
7  * Q Public License 1.0, which can be found in the file LICENSE.QPL included in
8  * the packaging of this file.
9  *
10  * Alternatively, it may be distributed under the terms of the GNU General
11  * Public License, either version 2 or 3, which can be found in the file
12  * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
13  * file.
14  */
15 
16 #include "objfw-defs.h"
17 
18 #include <errno.h>
19 
20 #include "platform.h"
21 
22 #if !defined(OF_HAVE_THREADS) || \
23  (!defined(OF_HAVE_PTHREADS) && !defined(OF_WINDOWS) && !defined(OF_AMIGAOS))
24 # error No mutexes available!
25 #endif
26 
27 #import "macros.h"
28 
29 #if defined(OF_HAVE_PTHREADS)
30 # include <pthread.h>
31 typedef pthread_mutex_t of_mutex_t;
32 #elif defined(OF_WINDOWS)
33 # include <windows.h>
34 typedef CRITICAL_SECTION of_mutex_t;
35 #elif defined(OF_AMIGAOS)
36 # include <exec/semaphores.h>
37 typedef struct SignalSemaphore of_mutex_t;
38 #endif
39 
40 #if defined(OF_HAVE_ATOMIC_OPS)
41 # import "atomic.h"
42 typedef volatile int of_spinlock_t;
43 # define OF_SPINCOUNT 10
44 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
45 typedef pthread_spinlock_t of_spinlock_t;
46 #else
47 typedef of_mutex_t of_spinlock_t;
48 #endif
49 
50 #ifdef OF_HAVE_SCHED_YIELD
51 # include <sched.h>
52 #endif
53 
54 #if defined(OF_HAVE_RECURSIVE_PTHREAD_MUTEXES) || defined(OF_WINDOWS) || \
55  defined(OF_AMIGAOS)
56 # define of_rmutex_t of_mutex_t
57 #else
58 # import "tlskey.h"
59 typedef struct {
60  of_mutex_t mutex;
61  of_tlskey_t count;
62 } of_rmutex_t;
63 #endif
64 
65 #ifdef __cplusplus
66 extern "C" {
67 #endif
68 extern int of_mutex_new(of_mutex_t *mutex);
69 extern int of_mutex_lock(of_mutex_t *mutex);
70 extern int of_mutex_trylock(of_mutex_t *mutex);
71 extern int of_mutex_unlock(of_mutex_t *mutex);
72 extern int of_mutex_free(of_mutex_t *mutex);
73 extern int of_rmutex_new(of_rmutex_t *rmutex);
74 extern int of_rmutex_lock(of_rmutex_t *rmutex);
75 extern int of_rmutex_trylock(of_rmutex_t *rmutex);
76 extern int of_rmutex_unlock(of_rmutex_t *rmutex);
77 extern int of_rmutex_free(of_rmutex_t *rmutex);
78 #ifdef __cplusplus
79 }
80 #endif
81 
82 /* Spinlocks are inlined for performance. */
83 
84 static OF_INLINE void
85 of_thread_yield(void)
86 {
87 #if defined(OF_HAVE_SCHED_YIELD)
88  sched_yield();
89 #elif defined(OF_WINDOWS)
90  Sleep(0);
91 #endif
92 }
93 
94 static OF_INLINE int
95 of_spinlock_new(of_spinlock_t *spinlock)
96 {
97 #if defined(OF_HAVE_ATOMIC_OPS)
98  *spinlock = 0;
99  return 0;
100 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
101  return pthread_spin_init(spinlock, 0);
102 #else
103  return of_mutex_new(spinlock);
104 #endif
105 }
106 
107 static OF_INLINE int
108 of_spinlock_trylock(of_spinlock_t *spinlock)
109 {
110 #if defined(OF_HAVE_ATOMIC_OPS)
111  if (of_atomic_int_cmpswap(spinlock, 0, 1)) {
112  of_memory_barrier_acquire();
113  return 0;
114  }
115 
116  return EBUSY;
117 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
118  return pthread_spin_trylock(spinlock);
119 #else
120  return of_mutex_trylock(spinlock);
121 #endif
122 }
123 
124 static OF_INLINE int
125 of_spinlock_lock(of_spinlock_t *spinlock)
126 {
127 #if defined(OF_HAVE_ATOMIC_OPS)
128  size_t i;
129 
130  for (i = 0; i < OF_SPINCOUNT; i++)
131  if (of_spinlock_trylock(spinlock) == 0)
132  return 0;
133 
134  while (of_spinlock_trylock(spinlock) == EBUSY)
135  of_thread_yield();
136 
137  return 0;
138 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
139  return pthread_spin_lock(spinlock);
140 #else
141  return of_mutex_lock(spinlock);
142 #endif
143 }
144 
145 static OF_INLINE int
146 of_spinlock_unlock(of_spinlock_t *spinlock)
147 {
148 #if defined(OF_HAVE_ATOMIC_OPS)
149  bool ret = of_atomic_int_cmpswap(spinlock, 1, 0);
150 
151  of_memory_barrier_release();
152 
153  return (ret ? 0 : EINVAL);
154 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
155  return pthread_spin_unlock(spinlock);
156 #else
157  return of_mutex_unlock(spinlock);
158 #endif
159 }
160 
161 static OF_INLINE int
162 of_spinlock_free(of_spinlock_t *spinlock)
163 {
164 #if defined(OF_HAVE_ATOMIC_OPS)
165  return 0;
166 #elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
167  return pthread_spin_destroy(spinlock);
168 #else
169  return of_mutex_free(spinlock);
170 #endif
171 }