1 Star 0 Fork 147

wanghongliang/glibc

forked from src-openEuler/glibc 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0002-add-header-files-for-libphtread_2_17_so.patch 92.48 KB
一键复制 编辑 原始数据 按行查看 历史
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609
From 76a50749f7af5935ba3739e815aa6a16ae4440d1 Mon Sep 17 00:00:00 2001
From: Ulrich Drepper <drepper@redhat.com>
Date: Tue Nov 26 22:50:54 2002 +0000
Subject: [PATCH 2/9] build extra lipthreadcond so
To successfully build some header files that reference glibc-2.17
Including but not limited to the following submission:
76a50749f7a
d5efd131d4e
eab380d8ec9
---
nptl_2_17/bits/pthreadtypes_2_17.h | 127 +++
nptl_2_17/bits/thread-shared-types_2_17.h | 186 ++++
nptl_2_17/internaltypes_2_17.h | 179 ++++
nptl_2_17/kernel-features_2_17.h | 162 +++
nptl_2_17/pthreadP_2_17.h | 714 +++++++++++++
nptl_2_17/pthread_2_17.h | 1175 +++++++++++++++++++++
6 files changed, 2543 insertions(+)
create mode 100644 nptl_2_17/bits/pthreadtypes_2_17.h
create mode 100644 nptl_2_17/bits/thread-shared-types_2_17.h
create mode 100644 nptl_2_17/internaltypes_2_17.h
create mode 100644 nptl_2_17/kernel-features_2_17.h
create mode 100644 nptl_2_17/pthreadP_2_17.h
create mode 100644 nptl_2_17/pthread_2_17.h
diff --git a/nptl_2_17/bits/pthreadtypes_2_17.h b/nptl_2_17/bits/pthreadtypes_2_17.h
new file mode 100644
index 00000000..da5521c1
--- /dev/null
+++ b/nptl_2_17/bits/pthreadtypes_2_17.h
@@ -0,0 +1,127 @@
+/* Declaration of common pthread types for all architectures.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _BITS_PTHREADTYPES_COMMON_H
+# define _BITS_PTHREADTYPES_COMMON_H 1
+
+/* For internal mutex and condition variable definitions. */
+#include "thread-shared-types_2_17.h"
+
+/* Thread identifiers. The structure of the attribute type is not
+ exposed on purpose. */
+typedef unsigned long int pthread_t;
+
+
+/* Data structures for mutex handling. The structure of the attribute
+ type is not exposed on purpose. */
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_MUTEXATTR_T];
+ int __align;
+} pthread_mutexattr_t;
+
+
+/* Data structure for condition variable handling. The structure of
+ the attribute type is not exposed on purpose. */
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_CONDATTR_T];
+ int __align;
+} pthread_condattr_t;
+
+
+/* Keys for thread-specific data */
+typedef unsigned int pthread_key_t;
+
+
+/* Once-only execution */
+typedef int __ONCE_ALIGNMENT pthread_once_t;
+
+
+union pthread_attr_t
+{
+ char __size[__SIZEOF_PTHREAD_ATTR_T];
+ long int __align;
+};
+#ifndef __have_pthread_attr_t
+typedef union pthread_attr_t pthread_attr_t;
+# define __have_pthread_attr_t 1
+#endif
+
+
+typedef union
+{
+ struct __pthread_mutex_s __data;
+ char __size[__SIZEOF_PTHREAD_MUTEX_T];
+ long int __align;
+} pthread_mutex_t;
+
+
+typedef union
+{
+struct
+{
+ int __lock;
+ unsigned int __futex;
+ __extension__ unsigned long long int __total_seq;
+ __extension__ unsigned long long int __wakeup_seq;
+ __extension__ unsigned long long int __woken_seq;
+ void *__mutex;
+ unsigned int __nwaiters;
+ unsigned int __broadcast_seq;
+}__data;
+ char __size[__SIZEOF_PTHREAD_COND_T];
+ long int __align;
+} pthread_cond_t;
+
+
+/* Data structure for reader-writer lock variable handling. The
+ structure of the attribute type is deliberately not exposed. */
+typedef union
+{
+ struct __pthread_rwlock_arch_t __data;
+ char __size[__SIZEOF_PTHREAD_RWLOCK_T];
+ long int __align;
+} pthread_rwlock_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_RWLOCKATTR_T];
+ long int __align;
+} pthread_rwlockattr_t;
+
+
+/* POSIX spinlock data type. */
+typedef volatile int pthread_spinlock_t;
+
+
+/* POSIX barriers data type. The structure of the type is
+ deliberately not exposed. */
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIER_T];
+ long int __align;
+} pthread_barrier_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIERATTR_T];
+ int __align;
+} pthread_barrierattr_t;
+
+#endif
diff --git a/nptl_2_17/bits/thread-shared-types_2_17.h b/nptl_2_17/bits/thread-shared-types_2_17.h
new file mode 100644
index 00000000..c855d0d8
--- /dev/null
+++ b/nptl_2_17/bits/thread-shared-types_2_17.h
@@ -0,0 +1,186 @@
+/* Common threading primitives definitions for both POSIX and C11.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _THREAD_SHARED_TYPES_H
+#define _THREAD_SHARED_TYPES_H 1
+
+#include <compat_pthread_2_17.h>
+/* Arch-specific definitions. Each architecture must define the following
+ macros to define the expected sizes of pthread data types:
+
+ __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t.
+ __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t.
+ __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t.
+ __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t.
+ __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t.
+ __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t.
+ __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t.
+ __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t.
+ __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t.
+
+ Also, the following macros must be define for internal pthread_mutex_t
+ struct definitions (struct __pthread_mutex_s):
+
+ __PTHREAD_COMPAT_PADDING_MID - any additional members after 'kind'
+ and before '__spin' (for 64 bits) or
+ '__nusers' (for 32 bits).
+ __PTHREAD_COMPAT_PADDING_END - any additional members at the end of
+ the internal structure.
+ __PTHREAD_MUTEX_LOCK_ELISION - 1 if the architecture supports lock
+ elision or 0 otherwise.
+ __PTHREAD_MUTEX_NUSERS_AFTER_KIND - control where to put __nusers. The
+ preferred value for new architectures
+ is 0.
+ __PTHREAD_MUTEX_USE_UNION - control whether internal __spins and
+ __list will be place inside a union for
+ linuxthreads compatibility.
+ The preferred value for new architectures
+ is 0.
+
+ For a new port the preferred values for the required defines are:
+
+ #define __PTHREAD_COMPAT_PADDING_MID
+ #define __PTHREAD_COMPAT_PADDING_END
+ #define __PTHREAD_MUTEX_LOCK_ELISION 0
+ #define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0
+ #define __PTHREAD_MUTEX_USE_UNION 0
+
+ __PTHREAD_MUTEX_LOCK_ELISION can be set to 1 if the hardware plans to
+ eventually support lock elision using transactional memory.
+
+ The additional macro defines any constraint for the lock alignment
+ inside the thread structures:
+
+ __LOCK_ALIGNMENT - for internal lock/futex usage.
+
+ Same idea but for the once locking primitive:
+
+ __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition.
+
+ And finally the internal pthread_rwlock_t (struct __pthread_rwlock_arch_t)
+ must be defined.
+ */
+#include <bits/pthreadtypes-arch.h>
+
+/* Common definition of pthread_mutex_t. */
+
+#if !__PTHREAD_MUTEX_USE_UNION
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+/* Lock elision support. */
+#if __PTHREAD_MUTEX_LOCK_ELISION
+# if !__PTHREAD_MUTEX_USE_UNION
+# define __PTHREAD_SPINS_DATA \
+ short __spins; \
+ short __elision
+# define __PTHREAD_SPINS 0, 0
+# else
+# define __PTHREAD_SPINS_DATA \
+ struct \
+ { \
+ short __espins; \
+ short __eelision; \
+ } __elision_data
+# define __PTHREAD_SPINS { 0, 0 }
+# define __spins __elision_data.__espins
+# define __elision __elision_data.__eelision
+# endif
+#else
+# define __PTHREAD_SPINS_DATA int __spins
+/* Mutex __spins initializer used by PTHREAD_MUTEX_INITIALIZER. */
+# define __PTHREAD_SPINS 0
+#endif
+
+struct __pthread_mutex_s
+{
+ int __lock __LOCK_ALIGNMENT;
+ unsigned int __count;
+ int __owner;
+#if !__PTHREAD_MUTEX_NUSERS_AFTER_KIND
+ unsigned int __nusers;
+#endif
+ /* KIND must stay at this position in the structure to maintain
+ binary compatibility with static initializers.
+
+ Concurrency notes:
+ The __kind of a mutex is initialized either by the static
+ PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init.
+
+ After a mutex has been initialized, the __kind of a mutex is usually not
+ changed. BUT it can be set to -1 in pthread_mutex_destroy or elision can
+ be enabled. This is done concurrently in the pthread_mutex_*lock functions
+ by using the macro FORCE_ELISION. This macro is only defined for
+ architectures which supports lock elision.
+
+ For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and
+ PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set
+ type of a mutex.
+ Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set
+ with pthread_mutexattr_settype.
+ After a mutex has been initialized, the functions pthread_mutex_*lock can
+ enable elision - if the mutex-type and the machine supports it - by setting
+ the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards
+ the lock / unlock functions are using specific elision code-paths. */
+ int __kind;
+ __PTHREAD_COMPAT_PADDING_MID
+#if __PTHREAD_MUTEX_NUSERS_AFTER_KIND
+ unsigned int __nusers;
+#endif
+#if !__PTHREAD_MUTEX_USE_UNION
+ __PTHREAD_SPINS_DATA;
+ __pthread_list_t __list;
+# define __PTHREAD_MUTEX_HAVE_PREV 1
+#else
+ __extension__ union
+ {
+ __PTHREAD_SPINS_DATA;
+ __pthread_slist_t __list;
+ };
+# define __PTHREAD_MUTEX_HAVE_PREV 0
+#endif
+ __PTHREAD_COMPAT_PADDING_END
+};
+
+
+/* Common definition of pthread_cond_t. */
+
+struct __pthread_cond_s
+{
+ int __lock;
+ unsigned int __futex;
+ __extension__ unsigned long long int __total_seq;
+ __extension__ unsigned long long int __wakeup_seq;
+ __extension__ unsigned long long int __woken_seq;
+ void *__mutex;
+ unsigned int __nwaiters;
+ unsigned int __broadcast_seq;
+
+long int __align;
+};
+
+#endif /* _THREAD_SHARED_TYPES_H */
diff --git a/nptl_2_17/internaltypes_2_17.h b/nptl_2_17/internaltypes_2_17.h
new file mode 100644
index 00000000..603dc01c
--- /dev/null
+++ b/nptl_2_17/internaltypes_2_17.h
@@ -0,0 +1,179 @@
+/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _INTERNALTYPES_H
+#define _INTERNALTYPES_H 1
+
+#include <stdint.h>
+#include <atomic.h>
+#include <endian.h>
+
+
+struct pthread_attr
+{
+ /* Scheduler parameters and priority. */
+ struct sched_param schedparam;
+ int schedpolicy;
+ /* Various flags like detachstate, scope, etc. */
+ int flags;
+ /* Size of guard area. */
+ size_t guardsize;
+ /* Stack handling. */
+ void *stackaddr;
+ size_t stacksize;
+ /* Affinity map. */
+ cpu_set_t *cpuset;
+ size_t cpusetsize;
+};
+
+#define ATTR_FLAG_DETACHSTATE 0x0001
+#define ATTR_FLAG_NOTINHERITSCHED 0x0002
+#define ATTR_FLAG_SCOPEPROCESS 0x0004
+#define ATTR_FLAG_STACKADDR 0x0008
+#define ATTR_FLAG_OLDATTR 0x0010
+#define ATTR_FLAG_SCHED_SET 0x0020
+#define ATTR_FLAG_POLICY_SET 0x0040
+
+
+/* Mutex attribute data structure. */
+struct pthread_mutexattr
+{
+ /* Identifier for the kind of mutex.
+
+ Bit 31 is set if the mutex is to be shared between processes.
+
+ Bit 0 to 30 contain one of the PTHREAD_MUTEX_ values to identify
+ the type of the mutex. */
+ int mutexkind;
+};
+
+
+/* Conditional variable attribute data structure. */
+struct pthread_condattr
+{
+ /* Combination of values:
+
+ Bit 0 : flag whether conditional variable will be
+ sharable between processes.
+ Bit 1-COND_CLOCK_BITS: Clock ID. COND_CLOCK_BITS is the number of bits
+ needed to represent the ID of the clock. */
+ int value;
+};
+#define COND_CLOCK_BITS 1
+#define COND_NWAITERS_SHIFT 1
+
+/* Read-write lock variable attribute data structure. */
+struct pthread_rwlockattr
+{
+ int lockkind;
+ int pshared;
+};
+
+
+/* Barrier data structure. See pthread_barrier_wait for a description
+ of how these fields are used. */
+struct pthread_barrier
+{
+ unsigned int in;
+ unsigned int current_round;
+ unsigned int count;
+ int shared;
+ unsigned int out;
+};
+/* See pthread_barrier_wait for a description. */
+#define BARRIER_IN_THRESHOLD (UINT_MAX/2)
+
+
+/* Barrier variable attribute data structure. */
+struct pthread_barrierattr
+{
+ int pshared;
+};
+
+
+/* Thread-local data handling. */
+struct pthread_key_struct
+{
+ /* Sequence numbers. Even numbers indicated vacant entries. Note
+ that zero is even. We use uintptr_t to not require padding on
+ 32- and 64-bit machines. On 64-bit machines it helps to avoid
+ wrapping, too. */
+ uintptr_t seq;
+
+ /* Destructor for the data. */
+ void (*destr) (void *);
+};
+
+/* Check whether an entry is unused. */
+#define KEY_UNUSED(p) (((p) & 1) == 0)
+/* Check whether a key is usable. We cannot reuse an allocated key if
+ the sequence counter would overflow after the next destroy call.
+ This would mean that we potentially free memory for a key with the
+ same sequence. This is *very* unlikely to happen, A program would
+ have to create and destroy a key 2^31 times (on 32-bit platforms,
+ on 64-bit platforms that would be 2^63). If it should happen we
+ simply don't use this specific key anymore. */
+#define KEY_USABLE(p) (((uintptr_t) (p)) < ((uintptr_t) ((p) + 2)))
+
+
+/* Handling of read-write lock data. */
+// XXX For now there is only one flag. Maybe more in future.
+#define RWLOCK_RECURSIVE(rwlock) ((rwlock)->__data.__flags != 0)
+
+
+/* Semaphore variable structure. */
+struct new_sem
+{
+#if __HAVE_64B_ATOMICS
+ /* The data field holds both value (in the least-significant 32 bits) and
+ nwaiters. */
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+# define SEM_VALUE_OFFSET 0
+# elif __BYTE_ORDER == __BIG_ENDIAN
+# define SEM_VALUE_OFFSET 1
+# else
+# error Unsupported byte order.
+# endif
+# define SEM_NWAITERS_SHIFT 32
+# define SEM_VALUE_MASK (~(unsigned int)0)
+ uint64_t data;
+ int private;
+ int pad;
+#else
+# define SEM_VALUE_SHIFT 1
+# define SEM_NWAITERS_MASK ((unsigned int)1)
+ unsigned int value;
+ int private;
+ int pad;
+ unsigned int nwaiters;
+#endif
+};
+
+struct old_sem
+{
+ unsigned int value;
+};
+
+
+/* Compatibility type for old conditional variable interfaces. */
+typedef struct
+{
+ pthread_cond_t *cond;
+} pthread_cond_2_0_t;
+
+#endif /* internaltypes.h */
diff --git a/nptl_2_17/kernel-features_2_17.h b/nptl_2_17/kernel-features_2_17.h
new file mode 100644
index 00000000..299ae0a1
--- /dev/null
+++ b/nptl_2_17/kernel-features_2_17.h
@@ -0,0 +1,162 @@
+/* Set flags signalling availability of kernel features based on given
+ kernel version number.
+ Copyright (C) 1999-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file must not contain any C code. At least it must be protected
+ to allow using the file also in assembler files. */
+
+#ifndef __LINUX_KERNEL_VERSION_2_17
+/* We assume the worst; all kernels should be supported. */
+# define __LINUX_KERNEL_VERSION_2_17 0
+#endif
+
+/* We assume for __LINUX_KERNEL_VERSION the same encoding used in
+ linux/version.h. I.e., the major, minor, and subminor all get a
+ byte with the major number being in the highest byte. This means
+ we can do numeric comparisons.
+
+ In the following we will define certain symbols depending on
+ whether the describes kernel feature is available in the kernel
+ version given by __LINUX_KERNEL_VERSION. We are not always exactly
+ recording the correct versions in which the features were
+ introduced. If somebody cares these values can afterwards be
+ corrected. */
+
+/* Some architectures use the socketcall multiplexer for some or all
+ socket-related operations instead of separate syscalls.
+ __ASSUME_SOCKETCALL is defined for such architectures. */
+
+/* The changed st_ino field appeared in 2.4.0-test6. However, SH is lame,
+ and still does not have a 64-bit inode field. */
+#define __ASSUME_ST_INO_64_BIT 1
+
+/* The statfs64 syscalls are available in 2.5.74 (but not for alpha). */
+#define __ASSUME_STATFS64 1
+
+/* pselect/ppoll were introduced just after 2.6.16-rc1. On x86_64 and
+ SH this appeared first in 2.6.19-rc1, on ia64 in 2.6.22-rc1. */
+#define __ASSUME_PSELECT 1
+
+/* The *at syscalls were introduced just after 2.6.16-rc1. On PPC
+ they were introduced in 2.6.17-rc1, on SH in 2.6.19-rc1. */
+#define __ASSUME_ATFCTS 1
+
+/* Support for inter-process robust mutexes was added in 2.6.17 (but
+ some architectures lack futex_atomic_cmpxchg_inatomic in some
+ configurations). */
+#define __ASSUME_SET_ROBUST_LIST 1
+
+/* Support for various CLOEXEC and NONBLOCK flags was added in
+ 2.6.27. */
+#define __ASSUME_IN_NONBLOCK 1
+
+/* Support for the FUTEX_CLOCK_REALTIME flag was added in 2.6.29. */
+#define __ASSUME_FUTEX_CLOCK_REALTIME 1
+
+/* Support for preadv and pwritev was added in 2.6.30. */
+#define __ASSUME_PREADV 1
+#define __ASSUME_PWRITEV 1
+
+
+/* Support for FUTEX_*_REQUEUE_PI was added in 2.6.31 (but some
+ * architectures lack futex_atomic_cmpxchg_inatomic in some
+ * configurations). */
+#define __ASSUME_REQUEUE_PI 1
+
+/* Support for sendmmsg functionality was added in 3.0. */
+#define __ASSUME_SENDMMSG 1
+
+/* On most architectures, most socket syscalls are supported for all
+ supported kernel versions, but on some socketcall architectures
+ separate syscalls were only added later. */
+#define __ASSUME_SENDMSG_SYSCALL 1
+#define __ASSUME_RECVMSG_SYSCALL 1
+#define __ASSUME_ACCEPT_SYSCALL 1
+#define __ASSUME_CONNECT_SYSCALL 1
+#define __ASSUME_RECVFROM_SYSCALL 1
+#define __ASSUME_SENDTO_SYSCALL 1
+#define __ASSUME_ACCEPT4_SYSCALL 1
+#define __ASSUME_RECVMMSG_SYSCALL 1
+#define __ASSUME_SENDMMSG_SYSCALL 1
+
+/* Support for SysV IPC through wired syscalls. All supported architectures
+ either support ipc syscall and/or all the ipc correspondent syscalls. */
+#define __ASSUME_DIRECT_SYSVIPC_SYSCALLS 1
+
+/* Support for p{read,write}v2 was added in 4.6. However Linux default
+ implementation does not assume the __ASSUME_* and instead use a fallback
+ implementation based on p{read,write}v and returning an error for
+ non supported flags. */
+
+/* Support for the renameat2 system call was added in kernel 3.15. */
+#if __LINUX_KERNEL_VERSION >= 0x030F00
+# define __ASSUME_RENAMEAT2
+#endif
+
+/* Support for the execveat syscall was added in 3.19. */
+#if __LINUX_KERNEL_VERSION >= 0x031300
+# define __ASSUME_EXECVEAT 1
+#endif
+
+#if __LINUX_KERNEL_VERSION >= 0x040400
+# define __ASSUME_MLOCK2 1
+#endif
+
+#if __LINUX_KERNEL_VERSION >= 0x040500
+# define __ASSUME_COPY_FILE_RANGE 1
+#endif
+
+/* Support for statx was added in kernel 4.11. */
+#if __LINUX_KERNEL_VERSION >= 0x040B00
+# define __ASSUME_STATX 1
+#endif
+
+/* Support for clone call used on fork. The signature varies across the
+ architectures with current 4 different variants:
+
+ 1. long int clone (unsigned long flags, unsigned long newsp,
+ int *parent_tidptr, unsigned long tls,
+ int *child_tidptr)
+
+ 2. long int clone (unsigned long newsp, unsigned long clone_flags,
+ int *parent_tidptr, int * child_tidptr,
+ unsigned long tls)
+
+ 3. long int clone (unsigned long flags, unsigned long newsp,
+ int stack_size, int *parent_tidptr,
+ int *child_tidptr, unsigned long tls)
+
+ 4. long int clone (unsigned long flags, unsigned long newsp,
+ int *parent_tidptr, int *child_tidptr,
+ unsigned long tls)
+
+ The fourth variant is intended to be used as the default for newer ports,
+ Also IA64 uses the third variant but with __NR_clone2 instead of
+ __NR_clone.
+
+ The macros names to define the variant used for the architecture is
+ similar to kernel:
+
+ - __ASSUME_CLONE_BACKWARDS: for variant 1.
+ - __ASSUME_CLONE_BACKWARDS2: for variant 2 (s390).
+ - __ASSUME_CLONE_BACKWARDS3: for variant 3 (microblaze).
+ - __ASSUME_CLONE_DEFAULT: for variant 4.
+ - __ASSUME_CLONE2: for clone2 with variant 3 (ia64).
+ */
+
+#define __ASSUME_CLONE_DEFAULT 1
diff --git a/nptl_2_17/pthreadP_2_17.h b/nptl_2_17/pthreadP_2_17.h
new file mode 100644
index 00000000..3050fa54
--- /dev/null
+++ b/nptl_2_17/pthreadP_2_17.h
@@ -0,0 +1,714 @@
+/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _PTHREADP_H
+#define _PTHREADP_H 1
+
+
+#include <compat_pthread_2_17.h>
+#include "kernel-features_2_17.h"
+#include "pthread_2_17.h"
+#include "internaltypes_2_17.h"
+
+#include <setjmp.h>
+#include <stdbool.h>
+#include <sys/syscall.h>
+#include "descr.h"
+#include <tls.h>
+#include <lowlevellock.h>
+#include <stackinfo.h>
+#include <pthread-functions_2_17.h>
+#include <atomic.h>
+#include <errno.h>
+#include <internal-signals.h>
+
+/* Atomic operations on TLS memory. */
+#ifndef THREAD_ATOMIC_CMPXCHG_VAL
+# define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, new, old) \
+ atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)
+#endif
+
+#ifndef THREAD_ATOMIC_BIT_SET
+# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
+ atomic_bit_set (&(descr)->member, bit)
+#endif
+
+
+/* Adaptive mutex definitions. */
+#ifndef MAX_ADAPTIVE_COUNT
+# define MAX_ADAPTIVE_COUNT 100
+#endif
+
+
+/* Magic cookie representing robust mutex with dead owner. */
+#define PTHREAD_MUTEX_INCONSISTENT INT_MAX
+/* Magic cookie representing not recoverable robust mutex. */
+#define PTHREAD_MUTEX_NOTRECOVERABLE (INT_MAX - 1)
+
+
+/* Internal mutex type value. */
+enum
+{
+ PTHREAD_MUTEX_KIND_MASK_NP = 3,
+
+ PTHREAD_MUTEX_ELISION_NP = 256,
+ PTHREAD_MUTEX_NO_ELISION_NP = 512,
+
+ PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
+ PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+ PTHREAD_MUTEX_PRIO_INHERIT_NP = 32,
+ PTHREAD_MUTEX_PI_NORMAL_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL,
+ PTHREAD_MUTEX_PI_RECURSIVE_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_PI_ERRORCHECK_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_PI_ADAPTIVE_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+ PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
+ PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
+ PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
+ PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
+ PTHREAD_MUTEX_PRIO_PROTECT_NP = 64,
+ PTHREAD_MUTEX_PP_NORMAL_NP
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
+ PTHREAD_MUTEX_PP_RECURSIVE_NP
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_PP_ERRORCHECK_NP
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_PP_ADAPTIVE_NP
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+ PTHREAD_MUTEX_ELISION_FLAGS_NP
+ = PTHREAD_MUTEX_ELISION_NP | PTHREAD_MUTEX_NO_ELISION_NP,
+
+ PTHREAD_MUTEX_TIMED_ELISION_NP =
+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_ELISION_NP,
+ PTHREAD_MUTEX_TIMED_NO_ELISION_NP =
+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_NO_ELISION_NP,
+};
+#define PTHREAD_MUTEX_PSHARED_BIT 128
+
+/* See concurrency notes regarding __kind in struct __pthread_mutex_s
+ in sysdeps/nptl/bits/thread-shared-types.h. */
+#define PTHREAD_MUTEX_TYPE(m) \
+ (atomic_load_relaxed (&((m)->__data.__kind)) & 127)
+/* Don't include NO_ELISION, as that type is always the same
+ as the underlying lock type. */
+#define PTHREAD_MUTEX_TYPE_ELISION(m) \
+ (atomic_load_relaxed (&((m)->__data.__kind)) \
+ & (127 | PTHREAD_MUTEX_ELISION_NP))
+
+#if LLL_PRIVATE == 0 && LLL_SHARED == 128
+# define PTHREAD_MUTEX_PSHARED(m) \
+ (atomic_load_relaxed (&((m)->__data.__kind)) & 128)
+#else
+# define PTHREAD_MUTEX_PSHARED(m) \
+ ((atomic_load_relaxed (&((m)->__data.__kind)) & 128) \
+ ? LLL_SHARED : LLL_PRIVATE)
+#endif
+
+/* The kernel when waking robust mutexes on exit never uses
+ FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
+#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED
+
+/* Ceiling in __data.__lock. __data.__lock is signed, so don't
+ use the MSB bit in there, but in the mask also include that bit,
+ so that the compiler can optimize & PTHREAD_MUTEX_PRIO_CEILING_MASK
+ masking if the value is then shifted down by
+ PTHREAD_MUTEX_PRIO_CEILING_SHIFT. */
+#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT 19
+#define PTHREAD_MUTEX_PRIO_CEILING_MASK 0xfff80000
+
+
+/* Flags in mutex attr. */
+#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT 28
+#define PTHREAD_MUTEXATTR_PROTOCOL_MASK 0x30000000
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT 12
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK 0x00fff000
+#define PTHREAD_MUTEXATTR_FLAG_ROBUST 0x40000000
+#define PTHREAD_MUTEXATTR_FLAG_PSHARED 0x80000000
+#define PTHREAD_MUTEXATTR_FLAG_BITS \
+ (PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED \
+ | PTHREAD_MUTEXATTR_PROTOCOL_MASK | PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
+
+
+/* For the following, see pthread_rwlock_common.c. */
+#define PTHREAD_RWLOCK_WRPHASE 1
+#define PTHREAD_RWLOCK_WRLOCKED 2
+#define PTHREAD_RWLOCK_RWAITING 4
+#define PTHREAD_RWLOCK_READER_SHIFT 3
+#define PTHREAD_RWLOCK_READER_OVERFLOW ((unsigned int) 1 \
+ << (sizeof (unsigned int) * 8 - 1))
+#define PTHREAD_RWLOCK_WRHANDOVER ((unsigned int) 1 \
+ << (sizeof (unsigned int) * 8 - 1))
+#define PTHREAD_RWLOCK_FUTEX_USED 2
+
+
+/* Bits used in robust mutex implementation. */
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+#define FUTEX_TID_MASK 0x3fffffff
+
+
+/* pthread_once definitions. See __pthread_once for how these are used. */
+#define __PTHREAD_ONCE_INPROGRESS 1
+#define __PTHREAD_ONCE_DONE 2
+#define __PTHREAD_ONCE_FORK_GEN_INCR 4
+
+/* Attribute to indicate thread creation was issued from C11 thrd_create. */
+#define ATTR_C11_THREAD ((void*)(uintptr_t)-1)
+
+#if 0
+/* Condition variable definitions. See __pthread_cond_wait_common.
+ Need to be defined here so there is one place from which
+ nptl_lock_constants can grab them. */
+#define __PTHREAD_COND_CLOCK_MONOTONIC_MASK 2
+#define __PTHREAD_COND_SHARED_MASK 1
+#endif
+
+/* Internal variables. */
+
+
+/* Default pthread attributes. */
+extern struct pthread_attr __default_pthread_attr attribute_hidden;
+extern int __default_pthread_attr_lock attribute_hidden;
+
+/* Size and alignment of static TLS block. */
+extern size_t __static_tls_size attribute_hidden;
+extern size_t __static_tls_align_m1 attribute_hidden;
+
+/* Flag whether the machine is SMP or not. */
+extern int __is_smp attribute_hidden;
+
+/* Thread descriptor handling. */
+extern list_t __stack_user;
+hidden_proto (__stack_user)
+
+/* Attribute handling. */
+extern struct pthread_attr *__attr_list attribute_hidden;
+extern int __attr_list_lock attribute_hidden;
+
+/* Concurrency handling. */
+extern int __concurrency_level attribute_hidden;
+
+/* Thread-local data key handling. */
+extern struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX];
+hidden_proto (__pthread_keys)
+
+/* Number of threads running. */
+extern unsigned int __nptl_nthreads attribute_hidden;
+
+#ifndef __ASSUME_SET_ROBUST_LIST
+/* Negative if we do not have the system call and we can use it. */
+extern int __set_robust_list_avail attribute_hidden;
+#endif
+
+/* Thread Priority Protection. */
+extern int __sched_fifo_min_prio attribute_hidden;
+extern int __sched_fifo_max_prio attribute_hidden;
+extern void __init_sched_fifo_prio (void) attribute_hidden;
+extern int __pthread_tpp_change_priority (int prev_prio, int new_prio)
+ attribute_hidden;
+extern int __pthread_current_priority (void) attribute_hidden;
+
+/* The library can run in debugging mode where it performs a lot more
+ tests. */
+extern int __pthread_debug attribute_hidden;
+/** For now disable debugging support. */
+#if 0
+# define DEBUGGING_P __builtin_expect (__pthread_debug, 0)
+# define INVALID_TD_P(pd) (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
+# define INVALID_NOT_TERMINATED_TD_P(pd) INVALID_TD_P (pd)
+#else
+# define DEBUGGING_P 0
+/* Simplified test. This will not catch all invalid descriptors but
+ is better than nothing. And if the test triggers the thread
+ descriptor is guaranteed to be invalid. */
+# define INVALID_TD_P(pd) __builtin_expect ((pd)->tid <= 0, 0)
+# define INVALID_NOT_TERMINATED_TD_P(pd) __builtin_expect ((pd)->tid < 0, 0)
+#endif
+
+
+/* Cancellation test. */
+#define CANCELLATION_P(self) \
+ do { \
+ int cancelhandling = THREAD_GETMEM (self, cancelhandling); \
+ if (CANCEL_ENABLED_AND_CANCELED (cancelhandling)) \
+ { \
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED); \
+ __do_cancel (); \
+ } \
+ } while (0)
+
+
+extern void __pthread_unwind (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute __attribute ((__noreturn__))
+ weak_function;
+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute __attribute ((__noreturn__))
+ weak_function;
+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+hidden_proto (__pthread_unwind)
+hidden_proto (__pthread_unwind_next)
+hidden_proto (__pthread_register_cancel)
+hidden_proto (__pthread_unregister_cancel)
+# ifdef SHARED
+extern void attribute_hidden pthread_cancel_init (void);
+# endif
+extern void __nptl_unwind_freeres (void) attribute_hidden;
+
+
+/* Called when a thread reacts on a cancellation request. */
+static inline void
+__attribute ((noreturn, always_inline))
+__do_cancel (void)
+{
+ struct pthread *self = THREAD_SELF;
+
+ /* Make sure we get no more cancellations. */
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+
+ __pthread_unwind ((__pthread_unwind_buf_t *)
+ THREAD_GETMEM (self, cleanup_jmp_buf));
+}
+
+
+/* Set cancellation mode to asynchronous. */
+#define CANCEL_ASYNC() \
+ __pthread_enable_asynccancel ()
+/* Reset to previous cancellation mode. */
+#define CANCEL_RESET(oldtype) \
+ __pthread_disable_asynccancel (oldtype)
+
+# undef LIBC_CANCEL_ASYNC
+# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
+
+# undef LIBC_CANCEL_RESET
+# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
+
+# define LIBC_CANCEL_HANDLED() \
+ __asm (".globl " __SYMBOL_PREFIX "__pthread_enable_asynccancel"); \
+ __asm (".globl " __SYMBOL_PREFIX "__pthread_disable_asynccancel")
+
+
+/* Internal prototypes. */
+
+/* Thread list handling. */
+extern struct pthread *__find_in_stack_list (struct pthread *pd)
+ attribute_hidden;
+
+/* Deallocate a thread's stack after optionally making sure the thread
+ descriptor is still valid. */
+extern void __free_tcb (struct pthread *pd) attribute_hidden;
+
+/* Free allocated stack. */
+extern void __deallocate_stack (struct pthread *pd) attribute_hidden;
+
+/* Mark all the stacks except for the current one as available. This
+ function also re-initializes the lock for the stack cache. */
+extern void __reclaim_stacks (void) attribute_hidden;
+
+/* Make all threads's stacks executable. */
+extern int __make_stacks_executable (void **stack_endp) attribute_hidden;
+
+/* longjmp handling. */
+extern void __pthread_cleanup_upto (__jmp_buf target, char *targetframe);
+hidden_proto (__pthread_cleanup_upto)
+
+
+/* Functions with versioned interfaces. */
+extern int __pthread_create_2_1 (pthread_t *newthread,
+ const pthread_attr_t *attr,
+ void *(*start_routine) (void *), void *arg);
+extern int __pthread_create_2_0 (pthread_t *newthread,
+ const pthread_attr_t *attr,
+ void *(*start_routine) (void *), void *arg);
+extern int __pthread_attr_init_2_1 (pthread_attr_t *attr);
+extern int __pthread_attr_init_2_0 (pthread_attr_t *attr);
+
+
+/* Event handlers for libthread_db interface. */
+extern void __nptl_create_event (void);
+extern void __nptl_death_event (void);
+hidden_proto (__nptl_create_event)
+hidden_proto (__nptl_death_event)
+
+/* Register the generation counter in the libpthread with the libc. */
+#ifdef TLS_MULTIPLE_THREADS_IN_TCB
+extern void __libc_pthread_init (unsigned long int *ptr,
+ void (*reclaim) (void),
+ const struct pthread_functions *functions);
+#else
+extern int *__libc_pthread_init (unsigned long int *ptr,
+ void (*reclaim) (void),
+ const struct pthread_functions *functions);
+
+/* Variable set to a nonzero value either if more than one thread runs or ran,
+ or if a single-threaded process is trying to cancel itself. See
+ nptl/descr.h for more context on the single-threaded process case. */
+extern int __pthread_multiple_threads attribute_hidden;
+/* Pointer to the corresponding variable in libc. */
+extern int *__libc_multiple_threads_ptr attribute_hidden;
+#endif
+
+/* Find a thread given its TID. */
+extern struct pthread *__find_thread_by_id (pid_t tid) attribute_hidden
+#ifdef SHARED
+;
+#else
+weak_function;
+#define __find_thread_by_id(tid) \
+ (__find_thread_by_id ? (__find_thread_by_id) (tid) : (struct pthread *) NULL)
+#endif
+
+extern void __pthread_init_static_tls (struct link_map *) attribute_hidden;
+
+extern size_t __pthread_get_minstack (const pthread_attr_t *attr);
+
+/* Namespace save aliases. */
+extern int __pthread_getschedparam (pthread_t thread_id, int *policy,
+ struct sched_param *param);
+extern int __pthread_setschedparam (pthread_t thread_id, int policy,
+ const struct sched_param *param);
+extern int __pthread_setcancelstate (int state, int *oldstate);
+extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
+ const pthread_mutexattr_t *__mutexattr);
+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
+extern int __pthread_mutex_trylock (pthread_mutex_t *_mutex);
+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
+extern int __pthread_mutex_timedlock (pthread_mutex_t *__mutex,
+ const struct timespec *__abstime);
+extern int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex)
+ attribute_hidden;
+extern void __pthread_mutex_cond_lock_adjust (pthread_mutex_t *__mutex)
+ attribute_hidden;
+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
+extern int __pthread_mutex_unlock_usercnt (pthread_mutex_t *__mutex,
+ int __decr) attribute_hidden;
+extern int __pthread_mutexattr_init (pthread_mutexattr_t *attr);
+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *attr);
+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind);
+extern int __pthread_attr_destroy (pthread_attr_t *attr);
+extern int __pthread_attr_getdetachstate (const pthread_attr_t *attr,
+ int *detachstate);
+extern int __pthread_attr_setdetachstate (pthread_attr_t *attr,
+ int detachstate);
+extern int __pthread_attr_getinheritsched (const pthread_attr_t *attr,
+ int *inherit);
+extern int __pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit);
+extern int __pthread_attr_getschedparam (const pthread_attr_t *attr,
+ struct sched_param *param);
+extern int __pthread_attr_setschedparam (pthread_attr_t *attr,
+ const struct sched_param *param);
+extern int __pthread_attr_getschedpolicy (const pthread_attr_t *attr,
+ int *policy);
+extern int __pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy);
+extern int __pthread_attr_getscope (const pthread_attr_t *attr, int *scope);
+extern int __pthread_attr_setscope (pthread_attr_t *attr, int scope);
+extern int __pthread_attr_getstackaddr (const pthread_attr_t *__restrict
+ __attr, void **__restrict __stackaddr);
+extern int __pthread_attr_setstackaddr (pthread_attr_t *__attr,
+ void *__stackaddr);
+extern int __pthread_attr_getstacksize (const pthread_attr_t *__restrict
+ __attr,
+ size_t *__restrict __stacksize);
+extern int __pthread_attr_setstacksize (pthread_attr_t *__attr,
+ size_t __stacksize);
+extern int __pthread_attr_getstack (const pthread_attr_t *__restrict __attr,
+ void **__restrict __stackaddr,
+ size_t *__restrict __stacksize);
+extern int __pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
+ size_t __stacksize);
+extern int __pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
+ const pthread_rwlockattr_t *__restrict
+ __attr);
+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
+extern int __pthread_cond_broadcast (pthread_cond_t *cond);
+extern int __pthread_cond_destroy (pthread_cond_t *cond);
+extern int __pthread_cond_init (pthread_cond_t *cond,
+ const pthread_condattr_t *cond_attr);
+extern int __pthread_cond_signal (pthread_cond_t *cond);
+extern int __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex);
+extern int __pthread_cond_timedwait (pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec *abstime);
+extern int __pthread_cond_clockwait (pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ clockid_t clockid,
+ const struct timespec *abstime)
+ __nonnull ((1, 2, 4));
+extern int __pthread_condattr_destroy (pthread_condattr_t *attr);
+extern int __pthread_condattr_init (pthread_condattr_t *attr);
+extern int __pthread_key_create (pthread_key_t *key, void (*destr) (void *));
+extern int __pthread_key_delete (pthread_key_t key);
+extern void *__pthread_getspecific (pthread_key_t key);
+extern int __pthread_setspecific (pthread_key_t key, const void *value);
+extern int __pthread_once (pthread_once_t *once_control,
+ void (*init_routine) (void));
+extern int __pthread_atfork (void (*prepare) (void), void (*parent) (void),
+ void (*child) (void));
+extern pthread_t __pthread_self (void);
+extern int __pthread_equal (pthread_t thread1, pthread_t thread2);
+extern int __pthread_detach (pthread_t th);
+extern int __pthread_cancel (pthread_t th);
+extern int __pthread_kill (pthread_t threadid, int signo);
+extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
+extern int __pthread_join (pthread_t threadid, void **thread_return);
+extern int __pthread_setcanceltype (int type, int *oldtype);
+extern int __pthread_enable_asynccancel (void) attribute_hidden;
+extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden;
+extern void __pthread_testcancel (void);
+extern int __pthread_timedjoin_ex (pthread_t, void **, const struct timespec *,
+ bool);
+
+hidden_proto (__pthread_mutex_init)
+hidden_proto (__pthread_mutex_destroy)
+hidden_proto (__pthread_mutex_lock)
+hidden_proto (__pthread_mutex_trylock)
+hidden_proto (__pthread_mutex_unlock)
+hidden_proto (__pthread_rwlock_rdlock)
+hidden_proto (__pthread_rwlock_wrlock)
+hidden_proto (__pthread_rwlock_unlock)
+hidden_proto (__pthread_key_create)
+hidden_proto (__pthread_getspecific)
+hidden_proto (__pthread_setspecific)
+hidden_proto (__pthread_once)
+hidden_proto (__pthread_setcancelstate)
+hidden_proto (__pthread_testcancel)
+hidden_proto (__pthread_mutexattr_init)
+hidden_proto (__pthread_mutexattr_settype)
+hidden_proto (__pthread_timedjoin_ex)
+
+extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond);
+extern int __pthread_cond_destroy_2_0 (pthread_cond_2_0_t *cond);
+extern int __pthread_cond_init_2_0 (pthread_cond_2_0_t *cond,
+ const pthread_condattr_t *cond_attr);
+extern int __pthread_cond_signal_2_0 (pthread_cond_2_0_t *cond);
+extern int __pthread_cond_timedwait_2_0 (pthread_cond_2_0_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec *abstime);
+extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond,
+ pthread_mutex_t *mutex);
+
+extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize,
+ cpu_set_t *cpuset);
+
+/* The two functions are in libc.so and not exported. */
+extern int __libc_enable_asynccancel (void) attribute_hidden;
+extern void __libc_disable_asynccancel (int oldtype) attribute_hidden;
+
+
+/* The two functions are in librt.so and not exported. */
+extern int __librt_enable_asynccancel (void) attribute_hidden;
+extern void __librt_disable_asynccancel (int oldtype) attribute_hidden;
+
+/* Special versions which use non-exported functions. */
+extern void __pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg)
+ attribute_hidden;
+
+/* Replace cleanup macros defined in <pthread.h> with internal
+ versions that don't depend on unwind info and better support
+ cancellation. */
+# undef pthread_cleanup_push
+# define pthread_cleanup_push(routine,arg) \
+ { struct _pthread_cleanup_buffer _buffer; \
+ __pthread_cleanup_push (&_buffer, (routine), (arg));
+
+extern void __pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
+ int execute) attribute_hidden;
+# undef pthread_cleanup_pop
+# define pthread_cleanup_pop(execute) \
+ __pthread_cleanup_pop (&_buffer, (execute)); }
+
+# if defined __EXCEPTIONS && !defined __cplusplus
+/* Structure to hold the cleanup handler information. */
+struct __pthread_cleanup_combined_frame
+{
+ void (*__cancel_routine) (void *);
+ void *__cancel_arg;
+ int __do_it;
+ struct _pthread_cleanup_buffer __buffer;
+};
+
+/* Special cleanup macros which register cleanup both using
+ __pthread_cleanup_{push,pop} and using cleanup attribute. This is needed
+ for pthread_once, so that it supports both throwing exceptions from the
+ pthread_once callback (only cleanup attribute works there) and cancellation
+ of the thread running the callback if the callback or some routines it
+ calls don't have unwind information. */
+
+static __always_inline void
+__pthread_cleanup_combined_routine (struct __pthread_cleanup_combined_frame
+ *__frame)
+{
+ if (__frame->__do_it)
+ {
+ __frame->__cancel_routine (__frame->__cancel_arg);
+ __frame->__do_it = 0;
+ __pthread_cleanup_pop (&__frame->__buffer, 0);
+ }
+}
+
+static inline void
+__pthread_cleanup_combined_routine_voidptr (void *__arg)
+{
+ struct __pthread_cleanup_combined_frame *__frame
+ = (struct __pthread_cleanup_combined_frame *) __arg;
+ if (__frame->__do_it)
+ {
+ __frame->__cancel_routine (__frame->__cancel_arg);
+ __frame->__do_it = 0;
+ }
+}
+
+# define pthread_cleanup_combined_push(routine, arg) \
+ do { \
+ void (*__cancel_routine) (void *) = (routine); \
+ struct __pthread_cleanup_combined_frame __clframe \
+ __attribute__ ((__cleanup__ (__pthread_cleanup_combined_routine))) \
+ = { .__cancel_routine = __cancel_routine, .__cancel_arg = (arg), \
+ .__do_it = 1 }; \
+ __pthread_cleanup_push (&__clframe.__buffer, \
+ __pthread_cleanup_combined_routine_voidptr, \
+ &__clframe);
+
+# define pthread_cleanup_combined_pop(execute) \
+ __pthread_cleanup_pop (&__clframe.__buffer, 0); \
+ __clframe.__do_it = 0; \
+ if (execute) \
+ __cancel_routine (__clframe.__cancel_arg); \
+ } while (0)
+
+# endif
+
+extern void __pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg);
+extern void __pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
+ int execute);
+
+/* Old cleanup interfaces, still used in libc.so. */
+extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg);
+extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
+ int execute);
+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg);
+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
+ int execute);
+
+extern void __nptl_deallocate_tsd (void) attribute_hidden;
+
+extern void __nptl_setxid_error (struct xid_command *cmdp, int error)
+ attribute_hidden;
+extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
+#ifndef SHARED
+extern void __nptl_set_robust (struct pthread *self);
+#endif
+
+extern void __nptl_stacks_freeres (void) attribute_hidden;
+extern void __shm_directory_freeres (void) attribute_hidden;
+
+extern void __wait_lookup_done (void) attribute_hidden;
+
+#ifdef SHARED
+# define PTHREAD_STATIC_FN_REQUIRE(name)
+#else
+# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
+#endif
+
+/* Test if the mutex is suitable for the FUTEX_WAIT_REQUEUE_PI operation. */
+#if (defined lll_futex_wait_requeue_pi \
+ && defined __ASSUME_REQUEUE_PI)
+# define USE_REQUEUE_PI(mut) \
+ ((mut) && (mut) != (void *) ~0l \
+ && (((mut)->__data.__kind \
+ & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) \
+ == PTHREAD_MUTEX_PRIO_INHERIT_NP))
+#else
+# define USE_REQUEUE_PI(mut) 0
+#endif
+
+
+/* Returns 0 if POL is a valid scheduling policy. */
+static inline int
+check_sched_policy_attr (int pol)
+{
+ if (pol == SCHED_OTHER || pol == SCHED_FIFO || pol == SCHED_RR)
+ return 0;
+
+ return EINVAL;
+}
+
+/* Returns 0 if PR is within the accepted range of priority values for
+ the scheduling policy POL or EINVAL otherwise. */
+static inline int
+check_sched_priority_attr (int pr, int pol)
+{
+ int min = __sched_get_priority_min (pol);
+ int max = __sched_get_priority_max (pol);
+
+ if (min >= 0 && max >= 0 && pr >= min && pr <= max)
+ return 0;
+
+ return EINVAL;
+}
+
+/* Returns 0 if ST is a valid stack size for a thread stack and EINVAL
+ otherwise. */
+static inline int
+check_stacksize_attr (size_t st)
+{
+ if (st >= PTHREAD_STACK_MIN)
+ return 0;
+
+ return EINVAL;
+}
+
+#define ASSERT_TYPE_SIZE(type, size) \
+ _Static_assert (sizeof (type) == size, \
+ "sizeof (" #type ") != " #size)
+
+#define ASSERT_PTHREAD_INTERNAL_SIZE(type, internal) \
+ _Static_assert (sizeof ((type) { { 0 } }).__size >= sizeof (internal),\
+ "sizeof (" #type ".__size) < sizeof (" #internal ")")
+
+#define ASSERT_PTHREAD_STRING(x) __STRING (x)
+#define ASSERT_PTHREAD_INTERNAL_OFFSET(type, member, offset) \
+ _Static_assert (offsetof (type, member) == offset, \
+ "offset of " #member " field of " #type " != " \
+ ASSERT_PTHREAD_STRING (offset))
+
+#endif /* pthreadP.h */
diff --git a/nptl_2_17/pthread_2_17.h b/nptl_2_17/pthread_2_17.h
new file mode 100644
index 00000000..3cb871a2
--- /dev/null
+++ b/nptl_2_17/pthread_2_17.h
@@ -0,0 +1,1175 @@
+/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _PTHREAD_H
+#define _PTHREAD_H 1
+
+#include "bits/pthreadtypes_2_17.h"
+
+#include <features.h>
+#include <endian.h>
+#include <sched.h>
+#include <time.h>
+
+#include <bits/setjmp.h>
+#include <bits/wordsize.h>
+#include <bits/types/struct_timespec.h>
+
+
+/* Detach state. */
+enum
+{
+ PTHREAD_CREATE_JOINABLE,
+#define PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_JOINABLE
+ PTHREAD_CREATE_DETACHED
+#define PTHREAD_CREATE_DETACHED PTHREAD_CREATE_DETACHED
+};
+
+
+/* Mutex types. */
+enum
+{
+ PTHREAD_MUTEX_TIMED_NP,
+ PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_ADAPTIVE_NP
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
+ ,
+ PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
+ PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
+#endif
+#ifdef __USE_GNU
+ /* For compatibility. */
+ , PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_TIMED_NP
+#endif
+};
+
+
+#ifdef __USE_XOPEN2K
+/* Robust mutex or not flags. */
+enum
+{
+ PTHREAD_MUTEX_STALLED,
+ PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED,
+ PTHREAD_MUTEX_ROBUST,
+ PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST
+};
+#endif
+
+
+#if defined __USE_POSIX199506 || defined __USE_UNIX98
+/* Mutex protocols. */
+enum
+{
+ PTHREAD_PRIO_NONE,
+ PTHREAD_PRIO_INHERIT,
+ PTHREAD_PRIO_PROTECT
+};
+#endif
+
+
+#if __PTHREAD_MUTEX_HAVE_PREV
+# define PTHREAD_MUTEX_INITIALIZER \
+ { { 0, 0, 0, 0, 0, __PTHREAD_SPINS, { 0, 0 } } }
+# ifdef __USE_GNU
+# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __PTHREAD_SPINS, { 0, 0 } } }
+# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __PTHREAD_SPINS, { 0, 0 } } }
+# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __PTHREAD_SPINS, { 0, 0 } } }
+
+# endif
+#else
+# define PTHREAD_MUTEX_INITIALIZER \
+ { { 0, 0, 0, 0, 0, { __PTHREAD_SPINS } } }
+# ifdef __USE_GNU
+# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, { __PTHREAD_SPINS } } }
+# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, { __PTHREAD_SPINS } } }
+# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
+ { { 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, { __PTHREAD_SPINS } } }
+
+# endif
+#endif
+
+
+/* Read-write lock types. */
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
+enum
+{
+ PTHREAD_RWLOCK_PREFER_READER_NP,
+ PTHREAD_RWLOCK_PREFER_WRITER_NP,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,
+ PTHREAD_RWLOCK_DEFAULT_NP = PTHREAD_RWLOCK_PREFER_READER_NP
+};
+
+/* Define __PTHREAD_RWLOCK_INT_FLAGS_SHARED to 1 if pthread_rwlock_t
+ has the shared field. All 64-bit architectures have the shared field
+ in pthread_rwlock_t. */
+#ifndef __PTHREAD_RWLOCK_INT_FLAGS_SHARED
+# if __WORDSIZE == 64
+# define __PTHREAD_RWLOCK_INT_FLAGS_SHARED 1
+# endif
+#endif
+
+/* Read-write lock initializers. */
+# define PTHREAD_RWLOCK_INITIALIZER \
+ { { 0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, 0 } }
+# ifdef __USE_GNU
+# ifdef __PTHREAD_RWLOCK_INT_FLAGS_SHARED
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+ { { 0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, \
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
+# else
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+ { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, \
+ 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, 0 } }
+# else
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+ { { 0, 0, 0, 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,\
+ 0 } }
+# endif
+# endif
+# endif
+#endif /* Unix98 or XOpen2K */
+
+
+/* Scheduler inheritance. */
+enum
+{
+ PTHREAD_INHERIT_SCHED,
+#define PTHREAD_INHERIT_SCHED PTHREAD_INHERIT_SCHED
+ PTHREAD_EXPLICIT_SCHED
+#define PTHREAD_EXPLICIT_SCHED PTHREAD_EXPLICIT_SCHED
+};
+
+
+/* Scope handling. */
+enum
+{
+ PTHREAD_SCOPE_SYSTEM,
+#define PTHREAD_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM
+ PTHREAD_SCOPE_PROCESS
+#define PTHREAD_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS
+};
+
+
+/* Process shared or private flag. */
+enum
+{
+ PTHREAD_PROCESS_PRIVATE,
+#define PTHREAD_PROCESS_PRIVATE PTHREAD_PROCESS_PRIVATE
+ PTHREAD_PROCESS_SHARED
+#define PTHREAD_PROCESS_SHARED PTHREAD_PROCESS_SHARED
+};
+
+
+
+/* Conditional variable handling. */
+#define PTHREAD_COND_INITIALIZER { { 0, 0, 0, 0, 0, (void *) 0, 0, 0 } }
+
+/* Cleanup buffers */
+struct _pthread_cleanup_buffer
+{
+ void (*__routine) (void *); /* Function to call. */
+ void *__arg; /* Its argument. */
+ int __canceltype; /* Saved cancellation type. */
+ struct _pthread_cleanup_buffer *__prev; /* Chaining of cleanup functions. */
+};
+
+/* Cancellation */
+enum
+{
+ PTHREAD_CANCEL_ENABLE,
+#define PTHREAD_CANCEL_ENABLE PTHREAD_CANCEL_ENABLE
+ PTHREAD_CANCEL_DISABLE
+#define PTHREAD_CANCEL_DISABLE PTHREAD_CANCEL_DISABLE
+};
+enum
+{
+ PTHREAD_CANCEL_DEFERRED,
+#define PTHREAD_CANCEL_DEFERRED PTHREAD_CANCEL_DEFERRED
+ PTHREAD_CANCEL_ASYNCHRONOUS
+#define PTHREAD_CANCEL_ASYNCHRONOUS PTHREAD_CANCEL_ASYNCHRONOUS
+};
+#define PTHREAD_CANCELED ((void *) -1)
+
+
+/* Single execution handling. */
+#define PTHREAD_ONCE_INIT 0
+
+
+#ifdef __USE_XOPEN2K
+/* Value returned by 'pthread_barrier_wait' for one of the threads after
+ the required number of threads have called this function.
+ -1 is distinct from 0 and all errno constants */
+# define PTHREAD_BARRIER_SERIAL_THREAD -1
+#endif
+
+
+__BEGIN_DECLS
+
+/* Create a new thread, starting with execution of START-ROUTINE
+ getting passed ARG. Creation attributed come from ATTR. The new
+ handle is stored in *NEWTHREAD. */
+extern int pthread_create (pthread_t *__restrict __newthread,
+ const pthread_attr_t *__restrict __attr,
+ void *(*__start_routine) (void *),
+ void *__restrict __arg) __THROWNL __nonnull ((1, 3));
+
+/* Terminate calling thread.
+
+ The registered cleanup handlers are called via exception handling
+ so we cannot mark this function with __THROW.*/
+extern void pthread_exit (void *__retval) __attribute__ ((__noreturn__));
+
+/* Make calling thread wait for termination of the thread TH. The
+ exit status of the thread is stored in *THREAD_RETURN, if THREAD_RETURN
+ is not NULL.
+
+ This function is a cancellation point and therefore not marked with
+ __THROW. */
+extern int pthread_join (pthread_t __th, void **__thread_return);
+
+#ifdef __USE_GNU
+/* Check whether thread TH has terminated. If yes return the status of
+ the thread in *THREAD_RETURN, if THREAD_RETURN is not NULL. */
+extern int pthread_tryjoin_np (pthread_t __th, void **__thread_return) __THROW;
+
+/* Make calling thread wait for termination of the thread TH, but only
+ until TIMEOUT. The exit status of the thread is stored in
+ *THREAD_RETURN, if THREAD_RETURN is not NULL.
+
+ This function is a cancellation point and therefore not marked with
+ __THROW. */
+extern int pthread_timedjoin_np (pthread_t __th, void **__thread_return,
+ const struct timespec *__abstime);
+#endif
+
+/* Indicate that the thread TH is never to be joined with PTHREAD_JOIN.
+ The resources of TH will therefore be freed immediately when it
+ terminates, instead of waiting for another thread to perform PTHREAD_JOIN
+ on it. */
+extern int pthread_detach (pthread_t __th) __THROW;
+
+
+/* Obtain the identifier of the current thread. */
+extern pthread_t pthread_self (void) __THROW __attribute__ ((__const__));
+
+/* Compare two thread identifiers. */
+extern int pthread_equal (pthread_t __thread1, pthread_t __thread2)
+ __THROW __attribute__ ((__const__));
+
+
+/* Thread attribute handling. */
+
+/* Initialize thread attribute *ATTR with default attributes
+ (detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
+ no user-provided stack). */
+extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1));
+
+/* Destroy thread attribute *ATTR. */
+extern int pthread_attr_destroy (pthread_attr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Get detach state attribute. */
+extern int pthread_attr_getdetachstate (const pthread_attr_t *__attr,
+ int *__detachstate)
+ __THROW __nonnull ((1, 2));
+
+/* Set detach state attribute. */
+extern int pthread_attr_setdetachstate (pthread_attr_t *__attr,
+ int __detachstate)
+ __THROW __nonnull ((1));
+
+
+/* Get the size of the guard area created for stack overflow protection. */
+extern int pthread_attr_getguardsize (const pthread_attr_t *__attr,
+ size_t *__guardsize)
+ __THROW __nonnull ((1, 2));
+
+/* Set the size of the guard area created for stack overflow protection. */
+extern int pthread_attr_setguardsize (pthread_attr_t *__attr,
+ size_t __guardsize)
+ __THROW __nonnull ((1));
+
+
+/* Return in *PARAM the scheduling parameters of *ATTR. */
+extern int pthread_attr_getschedparam (const pthread_attr_t *__restrict __attr,
+ struct sched_param *__restrict __param)
+ __THROW __nonnull ((1, 2));
+
+/* Set scheduling parameters (priority, etc) in *ATTR according to PARAM. */
+extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr,
+ const struct sched_param *__restrict
+ __param) __THROW __nonnull ((1, 2));
+
+/* Return in *POLICY the scheduling policy of *ATTR. */
+extern int pthread_attr_getschedpolicy (const pthread_attr_t *__restrict
+ __attr, int *__restrict __policy)
+ __THROW __nonnull ((1, 2));
+
+/* Set scheduling policy in *ATTR according to POLICY. */
+extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy)
+ __THROW __nonnull ((1));
+
+/* Return in *INHERIT the scheduling inheritance mode of *ATTR. */
+extern int pthread_attr_getinheritsched (const pthread_attr_t *__restrict
+ __attr, int *__restrict __inherit)
+ __THROW __nonnull ((1, 2));
+
+/* Set scheduling inheritance mode in *ATTR according to INHERIT. */
+extern int pthread_attr_setinheritsched (pthread_attr_t *__attr,
+ int __inherit)
+ __THROW __nonnull ((1));
+
+
+/* Return in *SCOPE the scheduling contention scope of *ATTR. */
+extern int pthread_attr_getscope (const pthread_attr_t *__restrict __attr,
+ int *__restrict __scope)
+ __THROW __nonnull ((1, 2));
+
+/* Set scheduling contention scope in *ATTR according to SCOPE. */
+extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope)
+ __THROW __nonnull ((1));
+
+/* Return the previously set address for the stack. */
+extern int pthread_attr_getstackaddr (const pthread_attr_t *__restrict
+ __attr, void **__restrict __stackaddr)
+ __THROW __nonnull ((1, 2)) __attribute_deprecated__;
+
+/* Set the starting address of the stack of the thread to be created.
+ Depending on whether the stack grows up or down the value must either
+ be higher or lower than all the address in the memory block. The
+ minimal size of the block must be PTHREAD_STACK_MIN. */
+extern int pthread_attr_setstackaddr (pthread_attr_t *__attr,
+ void *__stackaddr)
+ __THROW __nonnull ((1)) __attribute_deprecated__;
+
+/* Return the currently used minimal stack size. */
+extern int pthread_attr_getstacksize (const pthread_attr_t *__restrict
+ __attr, size_t *__restrict __stacksize)
+ __THROW __nonnull ((1, 2));
+
+/* Add information about the minimum stack size needed for the thread
+ to be started. This size must never be less than PTHREAD_STACK_MIN
+ and must also not exceed the system limits. */
+extern int pthread_attr_setstacksize (pthread_attr_t *__attr,
+ size_t __stacksize)
+ __THROW __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Return the previously set address for the stack. */
+extern int pthread_attr_getstack (const pthread_attr_t *__restrict __attr,
+ void **__restrict __stackaddr,
+ size_t *__restrict __stacksize)
+ __THROW __nonnull ((1, 2, 3));
+
+/* The following two interfaces are intended to replace the last two. They
+ require setting the address as well as the size since only setting the
+ address will make the implementation on some architectures impossible. */
+extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
+ size_t __stacksize) __THROW __nonnull ((1));
+#endif
+
+#ifdef __USE_GNU
+/* Thread created with attribute ATTR will be limited to run only on
+ the processors represented in CPUSET. */
+extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr,
+ size_t __cpusetsize,
+ const cpu_set_t *__cpuset)
+ __THROW __nonnull ((1, 3));
+
+/* Get bit set in CPUSET representing the processors threads created with
+ ATTR can run on. */
+extern int pthread_attr_getaffinity_np (const pthread_attr_t *__attr,
+ size_t __cpusetsize,
+ cpu_set_t *__cpuset)
+ __THROW __nonnull ((1, 3));
+
+/* Get the default attributes used by pthread_create in this process. */
+extern int pthread_getattr_default_np (pthread_attr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Set the default attributes to be used by pthread_create in this
+ process. */
+extern int pthread_setattr_default_np (const pthread_attr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Initialize thread attribute *ATTR with attributes corresponding to the
+ already running thread TH. It shall be called on uninitialized ATTR
+ and destroyed with pthread_attr_destroy when no longer needed. */
+extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr)
+ __THROW __nonnull ((2));
+#endif
+
+
+/* Functions for scheduling control. */
+
+/* Set the scheduling parameters for TARGET_THREAD according to POLICY
+ and *PARAM. */
+extern int pthread_setschedparam (pthread_t __target_thread, int __policy,
+ const struct sched_param *__param)
+ __THROW __nonnull ((3));
+
+/* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */
+extern int pthread_getschedparam (pthread_t __target_thread,
+ int *__restrict __policy,
+ struct sched_param *__restrict __param)
+ __THROW __nonnull ((2, 3));
+
+/* Set the scheduling priority for TARGET_THREAD. */
+extern int pthread_setschedprio (pthread_t __target_thread, int __prio)
+ __THROW;
+
+
+#ifdef __USE_GNU
+/* Get thread name visible in the kernel and its interfaces. */
+extern int pthread_getname_np (pthread_t __target_thread, char *__buf,
+ size_t __buflen)
+ __THROW __nonnull ((2));
+
+/* Set thread name visible in the kernel and its interfaces. */
+extern int pthread_setname_np (pthread_t __target_thread, const char *__name)
+ __THROW __nonnull ((2));
+#endif
+
+
+#ifdef __USE_UNIX98
+/* Determine level of concurrency. */
+extern int pthread_getconcurrency (void) __THROW;
+
+/* Set new concurrency level to LEVEL. */
+extern int pthread_setconcurrency (int __level) __THROW;
+#endif
+
+#ifdef __USE_GNU
+/* Yield the processor to another thread or process.
+ This function is similar to the POSIX `sched_yield' function but
+ might be differently implemented in the case of a m-on-n thread
+ implementation. */
+extern int pthread_yield (void) __THROW;
+
+
+/* Limit specified thread TH to run only on the processors represented
+ in CPUSET. */
+extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize,
+ const cpu_set_t *__cpuset)
+ __THROW __nonnull ((3));
+
+/* Get bit set in CPUSET representing the processors TH can run on. */
+extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
+ cpu_set_t *__cpuset)
+ __THROW __nonnull ((3));
+#endif
+
+
+/* Functions for handling initialization. */
+
+/* Guarantee that the initialization function INIT_ROUTINE will be called
+ only once, even if pthread_once is executed several times with the
+ same ONCE_CONTROL argument. ONCE_CONTROL must point to a static or
+ extern variable initialized to PTHREAD_ONCE_INIT.
+
+ The initialization functions might throw exception which is why
+ this function is not marked with __THROW. */
+extern int pthread_once (pthread_once_t *__once_control,
+ void (*__init_routine) (void)) __nonnull ((1, 2));
+
+
+/* Functions for handling cancellation.
+
+ Note that these functions are explicitly not marked to not throw an
+ exception in C++ code. If cancellation is implemented by unwinding
+ this is necessary to have the compiler generate the unwind information. */
+
+/* Set cancelability state of current thread to STATE, returning old
+ state in *OLDSTATE if OLDSTATE is not NULL. */
+extern int pthread_setcancelstate (int __state, int *__oldstate);
+
+/* Set cancellation state of current thread to TYPE, returning the old
+ type in *OLDTYPE if OLDTYPE is not NULL. */
+extern int pthread_setcanceltype (int __type, int *__oldtype);
+
+/* Cancel THREAD immediately or at the next possibility. */
+extern int pthread_cancel (pthread_t __th);
+
+/* Test for pending cancellation for the current thread and terminate
+ the thread as per pthread_exit(PTHREAD_CANCELED) if it has been
+ cancelled. */
+extern void pthread_testcancel (void);
+
+
+/* Cancellation handling with integration into exception handling. */
+
+typedef struct
+{
+ struct
+ {
+ __jmp_buf __cancel_jmp_buf;
+ int __mask_was_saved;
+ } __cancel_jmp_buf[1];
+ void *__pad[4];
+} __pthread_unwind_buf_t __attribute__ ((__aligned__));
+
+/* No special attributes by default. */
+#ifndef __cleanup_fct_attribute
+# define __cleanup_fct_attribute
+#endif
+
+
+/* Structure to hold the cleanup handler information. */
+struct __pthread_cleanup_frame
+{
+ void (*__cancel_routine) (void *);
+ void *__cancel_arg;
+ int __do_it;
+ int __cancel_type;
+};
+
+#if defined __GNUC__ && defined __EXCEPTIONS
+# ifdef __cplusplus
+/* Class to handle cancellation handler invocation. */
+class __pthread_cleanup_class
+{
+ void (*__cancel_routine) (void *);
+ void *__cancel_arg;
+ int __do_it;
+ int __cancel_type;
+
+ public:
+ __pthread_cleanup_class (void (*__fct) (void *), void *__arg)
+ : __cancel_routine (__fct), __cancel_arg (__arg), __do_it (1) { }
+ ~__pthread_cleanup_class () { if (__do_it) __cancel_routine (__cancel_arg); }
+ void __setdoit (int __newval) { __do_it = __newval; }
+ void __defer () { pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED,
+ &__cancel_type); }
+ void __restore () const { pthread_setcanceltype (__cancel_type, 0); }
+};
+
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
+ be called with arguments ARG when the matching pthread_cleanup_pop
+ is executed with non-zero EXECUTE argument.
+
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
+ be used in matching pairs at the same nesting level of braces. */
+# define pthread_cleanup_push(routine, arg) \
+ do { \
+ __pthread_cleanup_class __clframe (routine, arg)
+
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
+ If EXECUTE is non-zero, the handler function is called. */
+# define pthread_cleanup_pop(execute) \
+ __clframe.__setdoit (execute); \
+ } while (0)
+
+# ifdef __USE_GNU
+/* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and sets it to deferred
+ cancellation. */
+# define pthread_cleanup_push_defer_np(routine, arg) \
+ do { \
+ __pthread_cleanup_class __clframe (routine, arg); \
+ __clframe.__defer ()
+
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
+ restores the cancellation type that was in effect when the matching
+ pthread_cleanup_push_defer was called. */
+# define pthread_cleanup_pop_restore_np(execute) \
+ __clframe.__restore (); \
+ __clframe.__setdoit (execute); \
+ } while (0)
+# endif
+# else
+/* Function called to call the cleanup handler. As an extern inline
+ function the compiler is free to decide inlining the change when
+ needed or fall back on the copy which must exist somewhere
+ else. */
+__extern_inline void
+__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
+{
+ if (__frame->__do_it)
+ __frame->__cancel_routine (__frame->__cancel_arg);
+}
+
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
+ be called with arguments ARG when the matching pthread_cleanup_pop
+ is executed with non-zero EXECUTE argument.
+
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
+ be used in matching pairs at the same nesting level of braces. */
+# define pthread_cleanup_push(routine, arg) \
+ do { \
+ struct __pthread_cleanup_frame __clframe \
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
+ .__do_it = 1 };
+
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
+ If EXECUTE is non-zero, the handler function is called. */
+# define pthread_cleanup_pop(execute) \
+ __clframe.__do_it = (execute); \
+ } while (0)
+
+# ifdef __USE_GNU
+/* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and sets it to deferred
+ cancellation. */
+# define pthread_cleanup_push_defer_np(routine, arg) \
+ do { \
+ struct __pthread_cleanup_frame __clframe \
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
+ .__do_it = 1 }; \
+ (void) pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED, \
+ &__clframe.__cancel_type)
+
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
+ restores the cancellation type that was in effect when the matching
+ pthread_cleanup_push_defer was called. */
+# define pthread_cleanup_pop_restore_np(execute) \
+ (void) pthread_setcanceltype (__clframe.__cancel_type, NULL); \
+ __clframe.__do_it = (execute); \
+ } while (0)
+# endif
+# endif
+#else
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
+ be called with arguments ARG when the matching pthread_cleanup_pop
+ is executed with non-zero EXECUTE argument.
+
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
+ be used in matching pairs at the same nesting level of braces. */
+# define pthread_cleanup_push(routine, arg) \
+ do { \
+ __pthread_unwind_buf_t __cancel_buf; \
+ void (*__cancel_routine) (void *) = (routine); \
+ void *__cancel_arg = (arg); \
+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
+ __cancel_buf.__cancel_jmp_buf, 0); \
+ if (__glibc_unlikely (__not_first_call)) \
+ { \
+ __cancel_routine (__cancel_arg); \
+ __pthread_unwind_next (&__cancel_buf); \
+ /* NOTREACHED */ \
+ } \
+ \
+ __pthread_register_cancel (&__cancel_buf); \
+ do {
+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
+ If EXECUTE is non-zero, the handler function is called. */
+# define pthread_cleanup_pop(execute) \
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
+ } while (0); \
+ __pthread_unregister_cancel (&__cancel_buf); \
+ if (execute) \
+ __cancel_routine (__cancel_arg); \
+ } while (0)
+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+
+# ifdef __USE_GNU
+/* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and sets it to deferred
+ cancellation. */
+# define pthread_cleanup_push_defer_np(routine, arg) \
+ do { \
+ __pthread_unwind_buf_t __cancel_buf; \
+ void (*__cancel_routine) (void *) = (routine); \
+ void *__cancel_arg = (arg); \
+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
+ __cancel_buf.__cancel_jmp_buf, 0); \
+ if (__glibc_unlikely (__not_first_call)) \
+ { \
+ __cancel_routine (__cancel_arg); \
+ __pthread_unwind_next (&__cancel_buf); \
+ /* NOTREACHED */ \
+ } \
+ \
+ __pthread_register_cancel_defer (&__cancel_buf); \
+ do {
+extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
+ restores the cancellation type that was in effect when the matching
+ pthread_cleanup_push_defer was called. */
+# define pthread_cleanup_pop_restore_np(execute) \
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
+ } while (0); \
+ __pthread_unregister_cancel_restore (&__cancel_buf); \
+ if (execute) \
+ __cancel_routine (__cancel_arg); \
+ } while (0)
+extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute;
+# endif
+
+/* Internal interface to initiate cleanup. */
+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
+ __cleanup_fct_attribute __attribute__ ((__noreturn__))
+# ifndef SHARED
+ __attribute__ ((__weak__))
+# endif
+ ;
+#endif
+
+/* Function used in the macros. */
+struct __jmp_buf_tag;
+extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROWNL;
+
+
+/* Mutex handling. */
+
+/* Initialize a mutex. */
+extern int pthread_mutex_init (pthread_mutex_t *__mutex,
+ const pthread_mutexattr_t *__mutexattr)
+ __THROW __nonnull ((1));
+
+/* Destroy a mutex. */
+extern int pthread_mutex_destroy (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+
+/* Try locking a mutex. */
+extern int pthread_mutex_trylock (pthread_mutex_t *__mutex)
+ __THROWNL __nonnull ((1));
+
+/* Lock a mutex. */
+extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
+ __THROWNL __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Wait until lock becomes available, or specified time passes. */
+extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
+ const struct timespec *__restrict
+ __abstime) __THROWNL __nonnull ((1, 2));
+#endif
+
+/* Unlock a mutex. */
+extern int pthread_mutex_unlock (pthread_mutex_t *__mutex)
+ __THROWNL __nonnull ((1));
+
+
+/* Get the priority ceiling of MUTEX. */
+extern int pthread_mutex_getprioceiling (const pthread_mutex_t *
+ __restrict __mutex,
+ int *__restrict __prioceiling)
+ __THROW __nonnull ((1, 2));
+
+/* Set the priority ceiling of MUTEX to PRIOCEILING, return old
+ priority ceiling value in *OLD_CEILING. */
+extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
+ int __prioceiling,
+ int *__restrict __old_ceiling)
+ __THROW __nonnull ((1, 3));
+
+
+#ifdef __USE_XOPEN2K8
+/* Declare the state protected by MUTEX as consistent. */
+extern int pthread_mutex_consistent (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+# endif
+#endif
+
+
+/* Functions for handling mutex attributes. */
+
+/* Initialize mutex attribute object ATTR with default attributes
+ (kind is PTHREAD_MUTEX_TIMED_NP). */
+extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Destroy mutex attribute object ATTR. */
+extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Get the process-shared flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_getpshared (const pthread_mutexattr_t *
+ __restrict __attr,
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
+
+/* Set the process-shared flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr,
+ int __pshared)
+ __THROW __nonnull ((1));
+
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
+/* Return in *KIND the mutex kind attribute in *ATTR. */
+extern int pthread_mutexattr_gettype (const pthread_mutexattr_t *__restrict
+ __attr, int *__restrict __kind)
+ __THROW __nonnull ((1, 2));
+
+/* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL,
+ PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or
+ PTHREAD_MUTEX_DEFAULT). */
+extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
+ __THROW __nonnull ((1));
+#endif
+
+/* Return in *PROTOCOL the mutex protocol attribute in *ATTR. */
+extern int pthread_mutexattr_getprotocol (const pthread_mutexattr_t *
+ __restrict __attr,
+ int *__restrict __protocol)
+ __THROW __nonnull ((1, 2));
+
+/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either
+ PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT). */
+extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
+ int __protocol)
+ __THROW __nonnull ((1));
+
+/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR. */
+extern int pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *
+ __restrict __attr,
+ int *__restrict __prioceiling)
+ __THROW __nonnull ((1, 2));
+
+/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING. */
+extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr,
+ int __prioceiling)
+ __THROW __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Get the robustness flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_getrobust (const pthread_mutexattr_t *__attr,
+ int *__robustness)
+ __THROW __nonnull ((1, 2));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_getrobust_np (const pthread_mutexattr_t *__attr,
+ int *__robustness)
+ __THROW __nonnull ((1, 2));
+# endif
+
+/* Set the robustness flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr,
+ int __robustness)
+ __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr,
+ int __robustness)
+ __THROW __nonnull ((1));
+# endif
+#endif
+
+
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
+/* Functions for handling read-write locks. */
+
+/* Initialize read-write lock RWLOCK using attributes ATTR, or use
+ the default values if later is NULL. */
+extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
+ const pthread_rwlockattr_t *__restrict
+ __attr) __THROW __nonnull ((1));
+
+/* Destroy read-write lock RWLOCK. */
+extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
+
+/* Acquire read lock for RWLOCK. */
+extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
+ __THROWNL __nonnull ((1));
+
+/* Try to acquire read lock for RWLOCK. */
+extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
+ __THROWNL __nonnull ((1));
+
+# ifdef __USE_XOPEN2K
+/* Try to acquire read lock for RWLOCK or return after specfied time. */
+extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
+ const struct timespec *__restrict
+ __abstime) __THROWNL __nonnull ((1, 2));
+# endif
+
+/* Acquire write lock for RWLOCK. */
+extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
+ __THROWNL __nonnull ((1));
+
+/* Try to acquire write lock for RWLOCK. */
+extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
+ __THROWNL __nonnull ((1));
+
+# ifdef __USE_XOPEN2K
+/* Try to acquire write lock for RWLOCK or return after specfied time. */
+extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
+ const struct timespec *__restrict
+ __abstime) __THROWNL __nonnull ((1, 2));
+# endif
+
+/* Unlock RWLOCK. */
+extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
+ __THROWNL __nonnull ((1));
+
+
+/* Functions for handling read-write lock attributes. */
+
+/* Initialize attribute object ATTR with default values. */
+extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Destroy attribute object ATTR. */
+extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Return current setting of process-shared attribute of ATTR in PSHARED. */
+extern int pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *
+ __restrict __attr,
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
+
+/* Set process-shared attribute of ATTR to PSHARED. */
+extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr,
+ int __pshared)
+ __THROW __nonnull ((1));
+
+/* Return current setting of reader/writer preference. */
+extern int pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *
+ __restrict __attr,
+ int *__restrict __pref)
+ __THROW __nonnull ((1, 2));
+
+/* Set reader/write preference. */
+extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
+ int __pref) __THROW __nonnull ((1));
+#endif
+
+
+/* Functions for handling conditional variables. */
+
+/* Initialize condition variable COND using attributes ATTR, or use
+ the default values if later is NULL. */
+extern int pthread_cond_init (pthread_cond_t *__restrict __cond,
+ const pthread_condattr_t *__restrict __cond_attr)
+ __THROW __nonnull ((1));
+
+/* Destroy condition variable COND. */
+extern int pthread_cond_destroy (pthread_cond_t *__cond)
+ __THROW __nonnull ((1));
+
+/* Wake up one thread waiting for condition variable COND. */
+extern int pthread_cond_signal (pthread_cond_t *__cond)
+ __THROWNL __nonnull ((1));
+
+/* Wake up all threads waiting for condition variables COND. */
+extern int pthread_cond_broadcast (pthread_cond_t *__cond)
+ __THROWNL __nonnull ((1));
+
+/* Wait for condition variable COND to be signaled or broadcast.
+ MUTEX is assumed to be locked before.
+
+ This function is a cancellation point and therefore not marked with
+ __THROW. */
+extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
+ pthread_mutex_t *__restrict __mutex)
+ __nonnull ((1, 2));
+
+/* Wait for condition variable COND to be signaled or broadcast until
+ ABSTIME. MUTEX is assumed to be locked before. ABSTIME is an
+ absolute time specification; zero is the beginning of the epoch
+ (00:00:00 GMT, January 1, 1970).
+
+ This function is a cancellation point and therefore not marked with
+ __THROW. */
+extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
+ pthread_mutex_t *__restrict __mutex,
+ const struct timespec *__restrict __abstime)
+ __nonnull ((1, 2, 3));
+
+/* Wait for condition variable COND to be signaled or broadcast until
+ ABSTIME measured by the specified clock. MUTEX is assumed to be
+ locked before. CLOCK is the clock to use. ABSTIME is an absolute
+ time specification against CLOCK's epoch.
+
+ This function is a cancellation point and therefore not marked with
+ __THROW. */
+extern int pthread_cond_clockwait (pthread_cond_t *__restrict __cond,
+ pthread_mutex_t *__restrict __mutex,
+ __clockid_t __clock_id,
+ const struct timespec *__restrict __abstime)
+ __nonnull ((1, 2, 4));
+
+/* Functions for handling condition variable attributes. */
+
+/* Initialize condition variable attribute ATTR. */
+extern int pthread_condattr_init (pthread_condattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Destroy condition variable attribute ATTR. */
+extern int pthread_condattr_destroy (pthread_condattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Get the process-shared flag of the condition variable attribute ATTR. */
+extern int pthread_condattr_getpshared (const pthread_condattr_t *
+ __restrict __attr,
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
+
+/* Set the process-shared flag of the condition variable attribute ATTR. */
+extern int pthread_condattr_setpshared (pthread_condattr_t *__attr,
+ int __pshared) __THROW __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Get the clock selected for the condition variable attribute ATTR. */
+extern int pthread_condattr_getclock (const pthread_condattr_t *
+ __restrict __attr,
+ __clockid_t *__restrict __clock_id)
+ __THROW __nonnull ((1, 2));
+
+/* Set the clock selected for the condition variable attribute ATTR. */
+extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
+ __clockid_t __clock_id)
+ __THROW __nonnull ((1));
+#endif
+
+
+#ifdef __USE_XOPEN2K
+/* Functions to handle spinlocks. */
+
+/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can
+ be shared between different processes. */
+extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
+ __THROW __nonnull ((1));
+
+/* Destroy the spinlock LOCK. */
+extern int pthread_spin_destroy (pthread_spinlock_t *__lock)
+ __THROW __nonnull ((1));
+
+/* Wait until spinlock LOCK is retrieved. */
+extern int pthread_spin_lock (pthread_spinlock_t *__lock)
+ __THROWNL __nonnull ((1));
+
+/* Try to lock spinlock LOCK. */
+extern int pthread_spin_trylock (pthread_spinlock_t *__lock)
+ __THROWNL __nonnull ((1));
+
+/* Release spinlock LOCK. */
+extern int pthread_spin_unlock (pthread_spinlock_t *__lock)
+ __THROWNL __nonnull ((1));
+
+
+/* Functions to handle barriers. */
+
+/* Initialize BARRIER with the attributes in ATTR. The barrier is
+ opened when COUNT waiters arrived. */
+extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier,
+ const pthread_barrierattr_t *__restrict
+ __attr, unsigned int __count)
+ __THROW __nonnull ((1));
+
+/* Destroy a previously dynamically initialized barrier BARRIER. */
+extern int pthread_barrier_destroy (pthread_barrier_t *__barrier)
+ __THROW __nonnull ((1));
+
+/* Wait on barrier BARRIER. */
+extern int pthread_barrier_wait (pthread_barrier_t *__barrier)
+ __THROWNL __nonnull ((1));
+
+
+/* Initialize barrier attribute ATTR. */
+extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Destroy previously dynamically initialized barrier attribute ATTR. */
+extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr)
+ __THROW __nonnull ((1));
+
+/* Get the process-shared flag of the barrier attribute ATTR. */
+extern int pthread_barrierattr_getpshared (const pthread_barrierattr_t *
+ __restrict __attr,
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
+
+/* Set the process-shared flag of the barrier attribute ATTR. */
+extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
+ int __pshared)
+ __THROW __nonnull ((1));
+#endif
+
+
+/* Functions for handling thread-specific data. */
+
+/* Create a key value identifying a location in the thread-specific
+ data area. Each thread maintains a distinct thread-specific data
+ area. DESTR_FUNCTION, if non-NULL, is called with the value
+ associated to that key when the key is destroyed.
+ DESTR_FUNCTION is not called if the value associated is NULL when
+ the key is destroyed. */
+extern int pthread_key_create (pthread_key_t *__key,
+ void (*__destr_function) (void *))
+ __THROW __nonnull ((1));
+
+/* Destroy KEY. */
+extern int pthread_key_delete (pthread_key_t __key) __THROW;
+
+/* Return current value of the thread-specific data slot identified by KEY. */
+extern void *pthread_getspecific (pthread_key_t __key) __THROW;
+
+/* Store POINTER in the thread-specific data slot identified by KEY. */
+extern int pthread_setspecific (pthread_key_t __key,
+ const void *__pointer) __THROW ;
+
+
+#ifdef __USE_XOPEN2K
+/* Get ID of CPU-time clock for thread THREAD_ID. */
+extern int pthread_getcpuclockid (pthread_t __thread_id,
+ __clockid_t *__clock_id)
+ __THROW __nonnull ((2));
+#endif
+
+
+/* Install handlers to be called when a new process is created with FORK.
+ The PREPARE handler is called in the parent process just before performing
+ FORK. The PARENT handler is called in the parent process just after FORK.
+ The CHILD handler is called in the child process. Each of the three
+ handlers can be NULL, meaning that no handler needs to be called at that
+ point.
+ PTHREAD_ATFORK can be called several times, in which case the PREPARE
+ handlers are called in LIFO order (last added with PTHREAD_ATFORK,
+ first called before FORK), and the PARENT and CHILD handlers are called
+ in FIFO (first added, first called). */
+
+extern int pthread_atfork (void (*__prepare) (void),
+ void (*__parent) (void),
+ void (*__child) (void)) __THROW;
+
+
+#ifdef __USE_EXTERN_INLINES
+/* Optimizations. */
+__extern_inline int
+__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2))
+{
+ return __thread1 == __thread2;
+}
+#endif
+
+__END_DECLS
+
+#endif /* pthread.h */
--
2.30.0
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/wang-whl/glibc.git
git@gitee.com:wang-whl/glibc.git
wang-whl
glibc
glibc
master

搜索帮助

23e8dbc6 1850385 7e0993f3 1850385