| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | /* elide.h: Generic lock elision support.
 | 
					
						
							| 
									
										
										
										
											2018-01-01 00:32:25 +00:00
										 |  |  |    Copyright (C) 2014-2018 Free Software Foundation, Inc. | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  |    This file is part of the GNU C Library. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |    The GNU C Library is free software; you can redistribute it and/or | 
					
						
							|  |  |  |    modify it under the terms of the GNU Lesser General Public | 
					
						
							|  |  |  |    License as published by the Free Software Foundation; either | 
					
						
							|  |  |  |    version 2.1 of the License, or (at your option) any later version. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |    The GNU C Library is distributed in the hope that it will be useful, | 
					
						
							|  |  |  |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
					
						
							|  |  |  |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
					
						
							|  |  |  |    Lesser General Public License for more details. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |    You should have received a copy of the GNU Lesser General Public | 
					
						
							|  |  |  |    License along with the GNU C Library; if not, see | 
					
						
							|  |  |  |    <http://www.gnu.org/licenses/>.  */
 | 
					
						
							|  |  |  | #ifndef ELIDE_H
 | 
					
						
							|  |  |  | #define ELIDE_H 1
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include <hle.h>
 | 
					
						
							|  |  |  | #include <elision-conf.h>
 | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  | #include <atomic.h>
 | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Adapt elision with ADAPT_COUNT and STATUS and decide retries.  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline bool | 
					
						
							| 
									
										
										
										
											2014-06-24 10:42:43 +00:00
										 |  |  | elision_adapt(signed char *adapt_count, unsigned int status) | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | { | 
					
						
							|  |  |  |   if (status & _XABORT_RETRY) | 
					
						
							|  |  |  |     return false; | 
					
						
							|  |  |  |   if ((status & _XABORT_EXPLICIT) | 
					
						
							|  |  |  |       && _XABORT_CODE (status) == _ABORT_LOCK_BUSY) | 
					
						
							|  |  |  |     { | 
					
						
							|  |  |  |       /* Right now we skip here.  Better would be to wait a bit
 | 
					
						
							|  |  |  | 	 and retry.  This likely needs some spinning. Be careful | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  | 	 to avoid writing the lock. | 
					
						
							|  |  |  | 	 Using relaxed MO and separate atomic accesses is sufficient because | 
					
						
							|  |  |  | 	 adapt_count is just a hint.  */ | 
					
						
							|  |  |  |       if (atomic_load_relaxed (adapt_count) != __elision_aconf.skip_lock_busy) | 
					
						
							|  |  |  | 	atomic_store_relaxed (adapt_count, __elision_aconf.skip_lock_busy); | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  |     } | 
					
						
							|  |  |  |   /* Internal abort.  There is no chance for retry.
 | 
					
						
							|  |  |  |      Use the normal locking and next time use lock. | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  |      Be careful to avoid writing to the lock.  See above for MO.  */ | 
					
						
							|  |  |  |   else if (atomic_load_relaxed (adapt_count) | 
					
						
							|  |  |  |       != __elision_aconf.skip_lock_internal_abort) | 
					
						
							|  |  |  |     atomic_store_relaxed (adapt_count, | 
					
						
							|  |  |  | 	__elision_aconf.skip_lock_internal_abort); | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  |   return true; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* is_lock_free must be executed inside the transaction */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Returns true if lock defined by IS_LOCK_FREE was elided.
 | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  |    ADAPT_COUNT is a per-lock state variable; it must be accessed atomically | 
					
						
							|  |  |  |    to avoid data races but is just a hint, so using relaxed MO and separate | 
					
						
							|  |  |  |    atomic loads and stores instead of atomic read-modify-write operations is | 
					
						
							|  |  |  |    sufficient.  */ | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define ELIDE_LOCK(adapt_count, is_lock_free)			\
 | 
					
						
							|  |  |  |   ({								\ | 
					
						
							|  |  |  |     int ret = 0;						\ | 
					
						
							|  |  |  | 								\ | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  |     if (atomic_load_relaxed (&(adapt_count)) <= 0)		\ | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  |       {								\ | 
					
						
							|  |  |  |         for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \ | 
					
						
							|  |  |  |           {							\ | 
					
						
							|  |  |  |             unsigned int status;				\ | 
					
						
							|  |  |  | 	    if ((status = _xbegin ()) == _XBEGIN_STARTED)	\ | 
					
						
							|  |  |  | 	      {							\ | 
					
						
							|  |  |  | 	        if (is_lock_free)				\ | 
					
						
							|  |  |  | 	          {						\ | 
					
						
							|  |  |  | 		    ret = 1;					\ | 
					
						
							|  |  |  | 		    break;					\ | 
					
						
							|  |  |  | 	          }						\ | 
					
						
							|  |  |  | 	        _xabort (_ABORT_LOCK_BUSY);			\ | 
					
						
							|  |  |  | 	      }							\ | 
					
						
							|  |  |  | 	    if (!elision_adapt (&(adapt_count), status))	\ | 
					
						
							|  |  |  | 	      break;						\ | 
					
						
							|  |  |  |           }							\ | 
					
						
							|  |  |  |       }								\ | 
					
						
							|  |  |  |     else 							\ | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  |       atomic_store_relaxed (&(adapt_count),			\ | 
					
						
							|  |  |  | 	  atomic_load_relaxed (&(adapt_count)) - 1);		\ | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  |     ret;							\ | 
					
						
							|  |  |  |   }) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Returns true if lock defined by IS_LOCK_FREE was try-elided.
 | 
					
						
							| 
									
										
										
										
											2016-11-30 16:53:11 +00:00
										 |  |  |    ADAPT_COUNT is a per-lock state variable.  */ | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({	\
 | 
					
						
							|  |  |  |   int ret = 0;						\ | 
					
						
							|  |  |  |   if (__elision_aconf.retry_try_xbegin > 0)		\ | 
					
						
							|  |  |  |     {  							\ | 
					
						
							|  |  |  |       if (write)					\ | 
					
						
							|  |  |  |         _xabort (_ABORT_NESTED_TRYLOCK);		\ | 
					
						
							|  |  |  |       ret = ELIDE_LOCK (adapt_count, is_lock_free);     \ | 
					
						
							|  |  |  |     }							\ | 
					
						
							|  |  |  |     ret;						\ | 
					
						
							|  |  |  |     }) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-08-20 06:44:09 +00:00
										 |  |  | /* Returns true if lock defined by IS_LOCK_FREE was elided.  The call
 | 
					
						
							|  |  |  |    to _xend crashes if the application incorrectly tries to unlock a | 
					
						
							|  |  |  |    lock which has not been locked.  */ | 
					
						
							| 
									
										
										
										
											2014-03-31 15:07:46 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define ELIDE_UNLOCK(is_lock_free)		\
 | 
					
						
							|  |  |  |   ({						\ | 
					
						
							|  |  |  |   int ret = 0;					\ | 
					
						
							|  |  |  |   if (is_lock_free)				\ | 
					
						
							|  |  |  |     {						\ | 
					
						
							|  |  |  |       _xend ();					\ | 
					
						
							|  |  |  |       ret = 1;					\ | 
					
						
							|  |  |  |     }						\ | 
					
						
							|  |  |  |   ret;						\ | 
					
						
							|  |  |  |   }) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #endif
 |