17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
30#define KMP_WEIGHTED_ITERATIONS_SUPPORTED \
31 (KMP_AFFINITY_SUPPORTED && KMP_STATIC_STEAL_ENABLED && \
32 (KMP_ARCH_X86 || KMP_ARCH_X86_64))
34#define TASK_CURRENT_NOT_QUEUED 0
35#define TASK_CURRENT_QUEUED 1
37#define TASK_NOT_PUSHED 1
38#define TASK_SUCCESSFULLY_PUSHED 0
41#define TASK_EXPLICIT 1
42#define TASK_IMPLICIT 0
45#define TASK_DETACHABLE 1
46#define TASK_UNDETACHABLE 0
48#define KMP_CANCEL_THREADS
49#define KMP_THREAD_ATTR
53#if defined(__ANDROID__)
54#undef KMP_CANCEL_THREADS
60#undef KMP_CANCEL_THREADS
85#include "kmp_safe_c_api.h"
93#undef KMP_USE_HIER_SCHED
94#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
98#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED && !defined(OMPD_SKIP_HWLOC)
100#define KMP_HWLOC_ENABLED 1
101#ifndef HWLOC_OBJ_NUMANODE
102#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
104#ifndef HWLOC_OBJ_PACKAGE
105#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
108#define KMP_HWLOC_ENABLED 0
111#if KMP_ARCH_X86 || KMP_ARCH_X86_64
112#include <xmmintrin.h>
116#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
117#define KMP_INTERNAL_FREE(p) free(p)
118#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
119#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
121#include "kmp_debug.h"
123#include "kmp_version.h"
124#include "kmp_barrier.h"
126#include "kmp_debugger.h"
130#define KMP_HANDLE_SIGNALS ((KMP_OS_UNIX && !KMP_OS_WASI) || KMP_OS_WINDOWS)
132#include "kmp_wrapper_malloc.h"
135#if !defined NSIG && defined _NSIG
141#pragma weak clock_gettime
145#include "ompt-internal.h"
149#include "ompd-specific.h"
153#define UNLIKELY(x) (x)
162#ifndef USE_FAST_MEMORY
163#define USE_FAST_MEMORY 3
167#ifndef USE_CMP_XCHG_FOR_BGET
168#define USE_CMP_XCHG_FOR_BGET 1
176#define KMP_NSEC_PER_SEC 1000000000L
177#define KMP_USEC_PER_SEC 1000000L
178#define KMP_NSEC_PER_USEC 1000L
202 KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
203 KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
204 KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
206 KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
207 KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
220 KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
221 KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
222 KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
223 KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
224 KMP_IDENT_OPENMP_SPEC_VERSION_MASK = 0xFF000000
245 kmp_int32 get_openmp_version() {
246 return (((
flags & KMP_IDENT_OPENMP_SPEC_VERSION_MASK) >> 24) & 0xFF);
254typedef union kmp_team kmp_team_t;
255typedef struct kmp_taskdata kmp_taskdata_t;
256typedef union kmp_task_team kmp_task_team_t;
257typedef union kmp_team kmp_team_p;
258typedef union kmp_info kmp_info_p;
259typedef union kmp_root kmp_root_p;
261template <
bool C = false,
bool S = true>
class kmp_flag_32;
262template <
bool C = false,
bool S = true>
class kmp_flag_64;
263template <
bool C = false,
bool S = true>
class kmp_atomic_flag_64;
264class kmp_flag_oncore;
274#define KMP_PACK_64(HIGH_32, LOW_32) \
275 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
280 while (*(_x) == ' ' || *(_x) == '\t') \
283#define SKIP_DIGITS(_x) \
285 while (*(_x) >= '0' && *(_x) <= '9') \
288#define SKIP_TOKEN(_x) \
290 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
291 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
294#define SKIP_TO(_x, _c) \
296 while (*(_x) != '\0' && *(_x) != (_c)) \
302#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
303#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
308enum kmp_state_timer {
318#ifdef USE_LOAD_BALANCE
319 dynamic_load_balance,
322 dynamic_thread_limit,
328#ifndef KMP_SCHED_TYPE_DEFINED
329#define KMP_SCHED_TYPE_DEFINED
330typedef enum kmp_sched {
333 kmp_sched_static = 1,
334 kmp_sched_dynamic = 2,
335 kmp_sched_guided = 3,
337 kmp_sched_upper_std = 5,
338 kmp_sched_lower_ext = 100,
339 kmp_sched_trapezoidal = 101,
340#if KMP_STATIC_STEAL_ENABLED
341 kmp_sched_static_steal = 102,
344 kmp_sched_default = kmp_sched_static,
345 kmp_sched_monotonic = 0x80000000
355 kmp_sch_static_chunked = 33,
357 kmp_sch_dynamic_chunked = 35,
359 kmp_sch_runtime = 37,
361 kmp_sch_trapezoidal = 39,
364 kmp_sch_static_greedy = 40,
365 kmp_sch_static_balanced = 41,
367 kmp_sch_guided_iterative_chunked = 42,
368 kmp_sch_guided_analytical_chunked = 43,
370 kmp_sch_static_steal = 44,
373 kmp_sch_static_balanced_chunked = 45,
381 kmp_ord_static_chunked = 65,
383 kmp_ord_dynamic_chunked = 67,
384 kmp_ord_guided_chunked = 68,
385 kmp_ord_runtime = 69,
387 kmp_ord_trapezoidal = 71,
400 kmp_nm_static_chunked =
403 kmp_nm_dynamic_chunked = 163,
405 kmp_nm_runtime = 165,
407 kmp_nm_trapezoidal = 167,
410 kmp_nm_static_greedy = 168,
411 kmp_nm_static_balanced = 169,
413 kmp_nm_guided_iterative_chunked = 170,
414 kmp_nm_guided_analytical_chunked = 171,
415 kmp_nm_static_steal =
418 kmp_nm_ord_static_chunked = 193,
420 kmp_nm_ord_dynamic_chunked = 195,
421 kmp_nm_ord_guided_chunked = 196,
422 kmp_nm_ord_runtime = 197,
424 kmp_nm_ord_trapezoidal = 199,
446#define SCHEDULE_WITHOUT_MODIFIERS(s) \
449#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
450#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
451#define SCHEDULE_HAS_NO_MODIFIERS(s) \
452 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
453#define SCHEDULE_GET_MODIFIERS(s) \
454 ((enum sched_type)( \
455 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
456#define SCHEDULE_SET_MODIFIERS(s, m) \
457 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
458#define SCHEDULE_NONMONOTONIC 0
459#define SCHEDULE_MONOTONIC 1
466__kmp_sched_apply_mods_stdkind(kmp_sched_t *kind,
468 if (SCHEDULE_HAS_MONOTONIC(internal_kind)) {
469 *kind = (kmp_sched_t)((
int)*kind | (
int)kmp_sched_monotonic);
475__kmp_sched_apply_mods_intkind(kmp_sched_t kind,
477 if ((
int)kind & (
int)kmp_sched_monotonic) {
478 *internal_kind = (
enum sched_type)((
int)*internal_kind |
484static inline kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind) {
485 return (kmp_sched_t)((int)kind & ~((int)kmp_sched_monotonic));
489typedef union kmp_r_sched {
508enum mic_type { non_mic, mic1, mic2, mic3, dummy };
512typedef struct kmp_nested_nthreads_t {
516} kmp_nested_nthreads_t;
518extern kmp_nested_nthreads_t __kmp_nested_nth;
522#undef KMP_FAST_REDUCTION_BARRIER
523#define KMP_FAST_REDUCTION_BARRIER 1
525#undef KMP_FAST_REDUCTION_CORE_DUO
526#if KMP_ARCH_X86 || KMP_ARCH_X86_64
527#define KMP_FAST_REDUCTION_CORE_DUO 1
530enum _reduction_method {
531 reduction_method_not_defined = 0,
532 critical_reduce_block = (1 << 8),
533 atomic_reduce_block = (2 << 8),
534 tree_reduce_block = (3 << 8),
535 empty_reduce_block = (4 << 8)
550#if KMP_FAST_REDUCTION_BARRIER
551#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
552 ((reduction_method) | (barrier_type))
554#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
555 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
557#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
558 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
560#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
563#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
564 (packed_reduction_method)
566#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
569#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
570 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
571 (which_reduction_block))
573#if KMP_FAST_REDUCTION_BARRIER
574#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
575 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
577#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
578 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
581typedef int PACKED_REDUCTION_METHOD_T;
589#pragma warning(disable : 271 310)
621typedef enum kmp_hw_core_type_t {
622 KMP_HW_CORE_TYPE_UNKNOWN = 0x0,
623#if KMP_ARCH_X86 || KMP_ARCH_X86_64
624 KMP_HW_CORE_TYPE_ATOM = 0x20,
625 KMP_HW_CORE_TYPE_CORE = 0x40,
626 KMP_HW_MAX_NUM_CORE_TYPES = 3,
628 KMP_HW_MAX_NUM_CORE_TYPES = 1,
632#define KMP_HW_MAX_NUM_CORE_EFFS 8
634#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
635 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
636#define KMP_ASSERT_VALID_HW_TYPE(type) \
637 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
639#define KMP_FOREACH_HW_TYPE(type) \
640 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
641 type = (kmp_hw_t)((int)type + 1))
643const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural =
false);
644const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural =
false);
645const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type);
648#if KMP_AFFINITY_SUPPORTED
652#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
653typedef struct GROUP_AFFINITY {
659#if KMP_GROUP_AFFINITY
660extern int __kmp_num_proc_groups;
662static const int __kmp_num_proc_groups = 1;
664typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
665extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
667typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
668extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
670typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
671extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
673typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
675extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
679extern hwloc_topology_t __kmp_hwloc_topology;
680extern int __kmp_hwloc_error;
683extern size_t __kmp_affin_mask_size;
684#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
685#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
686#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
687#define KMP_CPU_SET_ITERATE(i, mask) \
688 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
689#define KMP_CPU_SET(i, mask) (mask)->set(i)
690#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
691#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
692#define KMP_CPU_ZERO(mask) (mask)->zero()
693#define KMP_CPU_ISEMPTY(mask) (mask)->empty()
694#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
695#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
696#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
697#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
698#define KMP_CPU_EQUAL(dest, src) (dest)->is_equal(src)
699#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
700#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
701#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
702#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
703#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
704#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
705#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
706#define KMP_CPU_ALLOC_ARRAY(arr, n) \
707 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
708#define KMP_CPU_FREE_ARRAY(arr, n) \
709 __kmp_affinity_dispatch->deallocate_mask_array(arr)
710#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
711#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
712#define __kmp_get_system_affinity(mask, abort_bool) \
713 (mask)->get_system_affinity(abort_bool)
714#define __kmp_set_system_affinity(mask, abort_bool) \
715 (mask)->set_system_affinity(abort_bool)
716#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
722 void *
operator new(
size_t n);
723 void operator delete(
void *p);
724 void *
operator new[](
size_t n);
725 void operator delete[](
void *p);
728 virtual void set(
int i) {}
730 virtual bool is_set(
int i)
const {
return false; }
732 virtual void clear(
int i) {}
734 virtual void zero() {}
736 virtual bool empty()
const {
return true; }
738 virtual void copy(
const Mask *src) {}
740 virtual void bitwise_and(
const Mask *rhs) {}
742 virtual void bitwise_or(
const Mask *rhs) {}
744 virtual void bitwise_not() {}
746 virtual bool is_equal(
const Mask *rhs)
const {
return false; }
749 virtual int begin()
const {
return 0; }
750 virtual int end()
const {
return 0; }
751 virtual int next(
int previous)
const {
return 0; }
753 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
756 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
758 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
761 virtual int get_proc_group()
const {
return -1; }
762 int get_max_cpu()
const {
765 KMP_CPU_SET_ITERATE(cpu,
this) {
772 void *
operator new(
size_t n);
773 void operator delete(
void *p);
775 virtual ~KMPAffinity() =
default;
777 virtual void determine_capable(
const char *env_var) {}
779 virtual void bind_thread(
int proc) {}
781 virtual Mask *allocate_mask() {
return nullptr; }
782 virtual void deallocate_mask(Mask *m) {}
783 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
784 virtual void deallocate_mask_array(Mask *m) {}
785 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
786 static void pick_api();
787 static void destroy_api();
795 virtual api_type get_api_type()
const {
801 static bool picked_api;
804typedef KMPAffinity::Mask kmp_affin_mask_t;
805extern KMPAffinity *__kmp_affinity_dispatch;
808class kmp_affinity_raii_t {
809 kmp_affin_mask_t *mask;
813 kmp_affinity_raii_t(
const kmp_affin_mask_t *new_mask =
nullptr)
814 : mask(nullptr), restored(false) {
815 if (KMP_AFFINITY_CAPABLE()) {
817 KMP_ASSERT(mask != NULL);
818 __kmp_get_system_affinity(mask,
true);
820 __kmp_set_system_affinity(new_mask,
true);
824 if (mask && KMP_AFFINITY_CAPABLE() && !restored) {
825 __kmp_set_system_affinity(mask,
true);
830 ~kmp_affinity_raii_t() { restore(); }
836#define KMP_AFFIN_MASK_PRINT_LEN 1024
850enum affinity_top_method {
851 affinity_top_method_all = 0,
852#if KMP_ARCH_X86 || KMP_ARCH_X86_64
853 affinity_top_method_apicid,
854 affinity_top_method_x2apicid,
855 affinity_top_method_x2apicid_1f,
857 affinity_top_method_cpuinfo,
858#if KMP_GROUP_AFFINITY
859 affinity_top_method_group,
861 affinity_top_method_flat,
863 affinity_top_method_hwloc,
865 affinity_top_method_default
868#define affinity_respect_mask_default (2)
870typedef struct kmp_affinity_flags_t {
872 unsigned verbose : 1;
873 unsigned warnings : 1;
874 unsigned respect : 2;
876 unsigned initialized : 1;
877 unsigned core_types_gran : 1;
878 unsigned core_effs_gran : 1;
879 unsigned omp_places : 1;
880 unsigned reserved : 22;
881} kmp_affinity_flags_t;
882KMP_BUILD_ASSERT(
sizeof(kmp_affinity_flags_t) == 4);
884typedef struct kmp_affinity_ids_t {
886 int ids[KMP_HW_LAST];
889typedef struct kmp_affinity_attrs_t {
893 unsigned reserved : 15;
894} kmp_affinity_attrs_t;
895#define KMP_AFFINITY_ATTRS_UNKNOWN \
896 { KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0, 0 }
898typedef struct kmp_affinity_t {
900 enum affinity_type type;
903 kmp_affinity_attrs_t core_attr_gran;
906 kmp_affinity_flags_t flags;
908 kmp_affin_mask_t *masks;
909 kmp_affinity_ids_t *ids;
910 kmp_affinity_attrs_t *attrs;
911 unsigned num_os_id_masks;
912 kmp_affin_mask_t *os_id_masks;
916#define KMP_AFFINITY_INIT(env) \
918 nullptr, affinity_default, KMP_HW_UNKNOWN, -1, KMP_AFFINITY_ATTRS_UNKNOWN, \
920 {TRUE, FALSE, TRUE, affinity_respect_mask_default, FALSE, FALSE, \
921 FALSE, FALSE, FALSE}, \
922 0, nullptr, nullptr, nullptr, 0, nullptr, env \
925extern enum affinity_top_method __kmp_affinity_top_method;
926extern kmp_affinity_t __kmp_affinity;
927extern kmp_affinity_t __kmp_hh_affinity;
928extern kmp_affinity_t *__kmp_affinities[2];
930extern void __kmp_affinity_bind_thread(
int which);
932extern kmp_affin_mask_t *__kmp_affin_fullMask;
933extern kmp_affin_mask_t *__kmp_affin_origMask;
934extern char *__kmp_cpuinfo_file;
936#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
937extern int __kmp_first_osid_with_ecore;
943typedef enum kmp_proc_bind_t {
953typedef struct kmp_nested_proc_bind_t {
954 kmp_proc_bind_t *bind_types;
957} kmp_nested_proc_bind_t;
959extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
960extern kmp_proc_bind_t __kmp_teams_proc_bind;
962extern int __kmp_display_affinity;
963extern char *__kmp_affinity_format;
964static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
966extern int __kmp_tool;
967extern char *__kmp_tool_libraries;
970#if KMP_AFFINITY_SUPPORTED
971#define KMP_PLACE_ALL (-1)
972#define KMP_PLACE_UNDEFINED (-2)
974#define KMP_AFFINITY_NON_PROC_BIND \
975 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
976 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
977 (__kmp_affinity.num_masks > 0 || __kmp_affinity.type == affinity_balanced))
980extern int __kmp_affinity_num_places;
982typedef enum kmp_cancel_kind_t {
991typedef struct kmp_hws_item {
996extern kmp_hws_item_t __kmp_hws_socket;
997extern kmp_hws_item_t __kmp_hws_die;
998extern kmp_hws_item_t __kmp_hws_node;
999extern kmp_hws_item_t __kmp_hws_tile;
1000extern kmp_hws_item_t __kmp_hws_core;
1001extern kmp_hws_item_t __kmp_hws_proc;
1002extern int __kmp_hws_requested;
1003extern int __kmp_hws_abs_flag;
1007#define KMP_PAD(type, sz) \
1008 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
1012#define KMP_GTID_DNE (-2)
1013#define KMP_GTID_SHUTDOWN (-3)
1014#define KMP_GTID_MONITOR (-4)
1015#define KMP_GTID_UNKNOWN (-5)
1016#define KMP_GTID_MIN (-6)
1022typedef uintptr_t omp_uintptr_t;
1025 omp_atk_sync_hint = 1,
1026 omp_atk_alignment = 2,
1028 omp_atk_pool_size = 4,
1029 omp_atk_fallback = 5,
1030 omp_atk_fb_data = 6,
1032 omp_atk_partition = 8,
1033 omp_atk_pin_device = 9,
1034 omp_atk_preferred_device = 10,
1035 omp_atk_device_access = 11,
1036 omp_atk_target_access = 12,
1037 omp_atk_atomic_scope = 13,
1038 omp_atk_part_size = 14
1039} omp_alloctrait_key_t;
1044 omp_atv_contended = 3,
1045 omp_atv_uncontended = 4,
1046 omp_atv_serialized = 5,
1047 omp_atv_sequential = omp_atv_serialized,
1048 omp_atv_private = 6,
1052 omp_atv_cgroup = 10,
1053 omp_atv_default_mem_fb = 11,
1054 omp_atv_null_fb = 12,
1055 omp_atv_abort_fb = 13,
1056 omp_atv_allocator_fb = 14,
1057 omp_atv_environment = 15,
1058 omp_atv_nearest = 16,
1059 omp_atv_blocked = 17,
1060 omp_atv_interleaved = 18,
1062 omp_atv_single = 20,
1063 omp_atv_multiple = 21,
1064 omp_atv_memspace = 22
1065} omp_alloctrait_value_t;
1066#define omp_atv_default ((omp_uintptr_t)-1)
1068typedef void *omp_memspace_handle_t;
1069extern omp_memspace_handle_t
const omp_null_mem_space;
1070extern omp_memspace_handle_t
const omp_default_mem_space;
1071extern omp_memspace_handle_t
const omp_large_cap_mem_space;
1072extern omp_memspace_handle_t
const omp_const_mem_space;
1073extern omp_memspace_handle_t
const omp_high_bw_mem_space;
1074extern omp_memspace_handle_t
const omp_low_lat_mem_space;
1075extern omp_memspace_handle_t
const llvm_omp_target_host_mem_space;
1076extern omp_memspace_handle_t
const llvm_omp_target_shared_mem_space;
1077extern omp_memspace_handle_t
const llvm_omp_target_device_mem_space;
1078extern omp_memspace_handle_t
const kmp_max_mem_space;
1081 omp_alloctrait_key_t key;
1082 omp_uintptr_t value;
1085typedef void *omp_allocator_handle_t;
1086extern omp_allocator_handle_t
const omp_null_allocator;
1087extern omp_allocator_handle_t
const omp_default_mem_alloc;
1088extern omp_allocator_handle_t
const omp_large_cap_mem_alloc;
1089extern omp_allocator_handle_t
const omp_const_mem_alloc;
1090extern omp_allocator_handle_t
const omp_high_bw_mem_alloc;
1091extern omp_allocator_handle_t
const omp_low_lat_mem_alloc;
1092extern omp_allocator_handle_t
const omp_cgroup_mem_alloc;
1093extern omp_allocator_handle_t
const omp_pteam_mem_alloc;
1094extern omp_allocator_handle_t
const omp_thread_mem_alloc;
1095extern omp_allocator_handle_t
const llvm_omp_target_host_mem_alloc;
1096extern omp_allocator_handle_t
const llvm_omp_target_shared_mem_alloc;
1097extern omp_allocator_handle_t
const llvm_omp_target_device_mem_alloc;
1098extern omp_allocator_handle_t
const kmp_max_mem_alloc;
1099extern omp_allocator_handle_t __kmp_def_allocator;
1104extern int __kmp_memkind_available;
1105extern bool __kmp_hwloc_available;
1109 omp_memspace_handle_t memspace;
1110 int num_resources = 0;
1111 int *resources =
nullptr;
1117 omp_memspace_handle_t memspace;
1120 omp_alloctrait_value_t fb;
1122 kmp_uint64 pool_size;
1123 kmp_uint64 pool_used;
1125 omp_alloctrait_value_t partition;
1127 int preferred_device;
1128 omp_alloctrait_value_t target_access;
1129 omp_alloctrait_value_t atomic_scope;
1131#if KMP_HWLOC_ENABLED
1132 omp_alloctrait_value_t membind;
1136extern omp_allocator_handle_t __kmpc_init_allocator(
int gtid,
1137 omp_memspace_handle_t,
1139 omp_alloctrait_t traits[]);
1140extern void __kmpc_destroy_allocator(
int gtid, omp_allocator_handle_t al);
1141extern void __kmpc_set_default_allocator(
int gtid, omp_allocator_handle_t al);
1142extern omp_allocator_handle_t __kmpc_get_default_allocator(
int gtid);
1144extern void *__kmpc_alloc(
int gtid,
size_t sz, omp_allocator_handle_t al);
1145extern void *__kmpc_aligned_alloc(
int gtid,
size_t align,
size_t sz,
1146 omp_allocator_handle_t al);
1147extern void *__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1148 omp_allocator_handle_t al);
1149extern void *__kmpc_realloc(
int gtid,
void *ptr,
size_t sz,
1150 omp_allocator_handle_t al,
1151 omp_allocator_handle_t free_al);
1152extern void __kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1154extern void *__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1155 omp_allocator_handle_t al);
1156extern void *__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1157 omp_allocator_handle_t al);
1158extern void *__kmp_realloc(
int gtid,
void *ptr,
size_t sz,
1159 omp_allocator_handle_t al,
1160 omp_allocator_handle_t free_al);
1161extern void ___kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1163extern void __kmp_init_memkind();
1164extern void __kmp_fini_memkind();
1165extern void __kmp_init_target_mem();
1166extern void __kmp_fini_target_mem();
1169extern omp_memspace_handle_t __kmp_get_devices_memspace(
int ndevs,
1171 omp_memspace_handle_t,
1173extern omp_allocator_handle_t __kmp_get_devices_allocator(
int ndevs,
1175 omp_memspace_handle_t,
1177extern int __kmp_get_memspace_num_resources(omp_memspace_handle_t memspace);
1178extern omp_memspace_handle_t
1179__kmp_get_submemspace(omp_memspace_handle_t memspace,
int num_resources,
1184#if ENABLE_LIBOMPTARGET
1185extern void __kmp_init_target_task();
1190#define KMP_UINT64_MAX \
1191 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1193#define KMP_MIN_NTH 1
1196#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1197#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1203#define KMP_MAX_NTH 64
1205#define KMP_MAX_NTH INT_MAX
1210#ifdef PTHREAD_STACK_MIN
1211#define KMP_MIN_STKSIZE ((size_t)PTHREAD_STACK_MIN)
1213#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1216#if KMP_OS_AIX && KMP_ARCH_PPC
1217#define KMP_MAX_STKSIZE 0x10000000
1219#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1223#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1224#elif KMP_ARCH_X86_64
1225#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1226#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1230#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1233#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1235#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1238#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1239#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1240#define KMP_MAX_MALLOC_POOL_INCR \
1241 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1243#define KMP_MIN_STKOFFSET (0)
1244#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1246#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1248#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1251#define KMP_MIN_STKPADDING (0)
1252#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1254#define KMP_BLOCKTIME_MULTIPLIER \
1256#define KMP_MIN_BLOCKTIME (0)
1257#define KMP_MAX_BLOCKTIME \
1261#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200000))
1264#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1265#define KMP_MIN_MONITOR_WAKEUPS (1)
1266#define KMP_MAX_MONITOR_WAKEUPS (1000)
1270#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1271 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1272 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1273 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1274 ? (monitor_wakeups) \
1275 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1279#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1280 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1281 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1283#define KMP_BLOCKTIME(team, tid) \
1284 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1285#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1287extern kmp_uint64 __kmp_ticks_per_msec;
1288extern kmp_uint64 __kmp_ticks_per_usec;
1289#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1290#define KMP_NOW() ((kmp_uint64)_rdtsc())
1292#define KMP_NOW() __kmp_hardware_timestamp()
1294#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1295 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_usec)
1296#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1299extern kmp_uint64 __kmp_now_nsec();
1300#define KMP_NOW() __kmp_now_nsec()
1301#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1302 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * (kmp_uint64)KMP_NSEC_PER_USEC)
1303#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1307#define KMP_MIN_STATSCOLS 40
1308#define KMP_MAX_STATSCOLS 4096
1309#define KMP_DEFAULT_STATSCOLS 80
1311#define KMP_MIN_INTERVAL 0
1312#define KMP_MAX_INTERVAL (INT_MAX - 1)
1313#define KMP_DEFAULT_INTERVAL 0
1315#define KMP_MIN_CHUNK 1
1316#define KMP_MAX_CHUNK (INT_MAX - 1)
1317#define KMP_DEFAULT_CHUNK 1
1319#define KMP_MIN_DISP_NUM_BUFF 1
1320#define KMP_DFLT_DISP_NUM_BUFF 7
1321#define KMP_MAX_DISP_NUM_BUFF 4096
1323#define KMP_MAX_ORDERED 8
1325#define KMP_MAX_FIELDS 32
1327#define KMP_MAX_BRANCH_BITS 31
1329#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1331#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1333#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1338#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1339#define KMP_TLS_GTID_MIN 5
1341#define KMP_TLS_GTID_MIN INT_MAX
1344#define KMP_MASTER_TID(tid) (0 == (tid))
1345#define KMP_WORKER_TID(tid) (0 != (tid))
1347#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1348#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1349#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1353#define TRUE (!FALSE)
1359#define KMP_INIT_WAIT 64U
1360#define KMP_NEXT_WAIT 32U
1362#define KMP_INIT_WAIT 1024U
1363#define KMP_NEXT_WAIT 512U
1366#define KMP_INIT_WAIT 1024U
1367#define KMP_NEXT_WAIT 512U
1368#elif KMP_OS_DRAGONFLY
1370#define KMP_INIT_WAIT 1024U
1371#define KMP_NEXT_WAIT 512U
1374#define KMP_INIT_WAIT 1024U
1375#define KMP_NEXT_WAIT 512U
1378#define KMP_INIT_WAIT 1024U
1379#define KMP_NEXT_WAIT 512U
1382#define KMP_INIT_WAIT 1024U
1383#define KMP_NEXT_WAIT 512U
1386#define KMP_INIT_WAIT 1024U
1387#define KMP_NEXT_WAIT 512U
1390#define KMP_INIT_WAIT 1024U
1391#define KMP_NEXT_WAIT 512U
1394#define KMP_INIT_WAIT 1024U
1395#define KMP_NEXT_WAIT 512U
1398#define KMP_INIT_WAIT 1024U
1399#define KMP_NEXT_WAIT 512U
1402#define KMP_INIT_WAIT 1024U
1403#define KMP_NEXT_WAIT 512U
1406#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1407typedef struct kmp_cpuid {
1414typedef struct kmp_cpuinfo_flags_t {
1417 unsigned hybrid : 1;
1418 unsigned reserved : 29;
1419} kmp_cpuinfo_flags_t;
1421typedef struct kmp_cpuinfo {
1428 kmp_cpuinfo_flags_t flags;
1430 kmp_uint64 frequency;
1431 char name[3 *
sizeof(kmp_cpuid_t)];
1434extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
1439static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *p) {
1440 __asm__ __volatile__(
"cpuid"
1441 :
"=a"(p->eax),
"=b"(p->ebx),
"=c"(p->ecx),
"=d"(p->edx)
1442 :
"a"(leaf),
"c"(subleaf));
1445static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p) {
1446 __asm__ __volatile__(
"fldcw %0" : :
"m"(*p));
1449static inline void __kmp_store_x87_fpu_control_word(kmp_int16 *p) {
1450 __asm__ __volatile__(
"fstcw %0" :
"=m"(*p));
1452static inline void __kmp_clear_x87_fpu_status_word() {
1455 struct x87_fpu_state {
1464 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1465 __asm__ __volatile__(
"fstenv %0\n\t"
1466 "andw $0x7f00, %1\n\t"
1468 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1470 __asm__ __volatile__(
"fnclex");
1474static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1475static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1477static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) {}
1478static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = 0; }
1482extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *p);
1483extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p);
1484extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
1485extern void __kmp_clear_x87_fpu_status_word();
1486static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1487static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1490#define KMP_X86_MXCSR_MASK 0xffffffc0
1495#if KMP_HAVE_WAITPKG_INTRINSICS
1496#if KMP_HAVE_IMMINTRIN_H
1497#include <immintrin.h>
1498#elif KMP_HAVE_INTRIN_H
1503KMP_ATTRIBUTE_TARGET_WAITPKG
1504static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
1505#if !KMP_HAVE_WAITPKG_INTRINSICS
1506 uint32_t timeHi = uint32_t(counter >> 32);
1507 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1509 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1515 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1519 return _tpause(hint, counter);
1522KMP_ATTRIBUTE_TARGET_WAITPKG
1523static inline void __kmp_umonitor(
void *cacheline) {
1524#if !KMP_HAVE_WAITPKG_INTRINSICS
1525 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1530 _umonitor(cacheline);
1533KMP_ATTRIBUTE_TARGET_WAITPKG
1534static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
1535#if !KMP_HAVE_WAITPKG_INTRINSICS
1536 uint32_t timeHi = uint32_t(counter >> 32);
1537 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1539 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1545 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1549 return _umwait(hint, counter);
1554#include <pmmintrin.h>
1559__attribute__((target(
"sse3")))
1562__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1563 _mm_monitor(cacheline, extensions, hints);
1566__attribute__((target(
"sse3")))
1569__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1570 _mm_mwait(extensions, hints);
1575extern void __kmp_x86_pause(
void);
1581static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1583static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1585#define KMP_CPU_PAUSE() __kmp_x86_pause()
1587#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1588#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1589#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1590#define KMP_CPU_PAUSE() \
1592 KMP_PPC64_PRI_LOW(); \
1593 KMP_PPC64_PRI_MED(); \
1594 KMP_PPC64_PRI_LOC_MB(); \
1597#define KMP_CPU_PAUSE()
1600#define KMP_INIT_YIELD(count) \
1601 { (count) = __kmp_yield_init; }
1603#define KMP_INIT_BACKOFF(time) \
1604 { (time) = __kmp_pause_init; }
1606#define KMP_OVERSUBSCRIBED \
1607 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1609#define KMP_TRY_YIELD \
1610 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1612#define KMP_TRY_YIELD_OVERSUB \
1613 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1615#define KMP_YIELD(cond) \
1618 if ((cond) && (KMP_TRY_YIELD)) \
1622#define KMP_YIELD_OVERSUB() \
1625 if ((KMP_TRY_YIELD_OVERSUB)) \
1631#define KMP_YIELD_SPIN(count) \
1634 if (KMP_TRY_YIELD) { \
1638 (count) = __kmp_yield_next; \
1649#define KMP_TPAUSE_MAX_MASK ((kmp_uint64)0xFFFF)
1650#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1652 if (__kmp_tpause_enabled) { \
1653 if (KMP_OVERSUBSCRIBED) { \
1654 __kmp_tpause(0, (time)); \
1656 __kmp_tpause(__kmp_tpause_hint, (time)); \
1658 (time) = (time << 1 | 1) & KMP_TPAUSE_MAX_MASK; \
1661 if ((KMP_TRY_YIELD_OVERSUB)) { \
1663 } else if (__kmp_use_yield == 1) { \
1667 (count) = __kmp_yield_next; \
1673#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1676 if ((KMP_TRY_YIELD_OVERSUB)) \
1678 else if (__kmp_use_yield == 1) { \
1682 (count) = __kmp_yield_next; \
1702 ct_ordered_in_parallel,
1710#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1714 enum cons_type type;
1721 int p_top, w_top, s_top;
1722 int stack_size, stack_top;
1723 struct cons_data *stack_data;
1726struct kmp_region_info {
1728 int offset[KMP_MAX_FIELDS];
1729 int length[KMP_MAX_FIELDS];
1736typedef HANDLE kmp_thread_t;
1737typedef DWORD kmp_key_t;
1741typedef pthread_t kmp_thread_t;
1742typedef pthread_key_t kmp_key_t;
1745extern kmp_key_t __kmp_gtid_threadprivate_key;
1747typedef struct kmp_sys_info {
1761typedef int kmp_itt_mark_t;
1762#define KMP_ITT_DEBUG 0
1765typedef kmp_int32 kmp_critical_name[8];
1776typedef void (*
kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1777typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1794typedef void *(*kmpc_ctor)(
void *);
1807typedef void *(*kmpc_cctor)(
void *,
void *);
1817typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1829typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1837typedef struct kmp_cached_addr {
1839 void ***compiler_cache;
1841 struct kmp_cached_addr *next;
1844struct private_data {
1845 struct private_data *next;
1851struct private_common {
1852 struct private_common *next;
1853 struct private_common *link;
1859struct shared_common {
1860 struct shared_common *next;
1861 struct private_data *pod_init;
1881#define KMP_HASH_TABLE_LOG2 9
1882#define KMP_HASH_TABLE_SIZE \
1883 (1 << KMP_HASH_TABLE_LOG2)
1884#define KMP_HASH_SHIFT 3
1885#define KMP_HASH(x) \
1886 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1888struct common_table {
1889 struct private_common *data[KMP_HASH_TABLE_SIZE];
1892struct shared_table {
1893 struct shared_common *data[KMP_HASH_TABLE_SIZE];
1898#if KMP_USE_HIER_SCHED
1901typedef struct kmp_hier_private_bdata_t {
1902 kmp_int32 num_active;
1904 kmp_uint64 wait_val[2];
1905} kmp_hier_private_bdata_t;
1908typedef struct kmp_sched_flags {
1909 unsigned ordered : 1;
1910 unsigned nomerge : 1;
1911 unsigned contains_last : 1;
1912 unsigned use_hier : 1;
1913 unsigned use_hybrid : 1;
1914 unsigned unused : 27;
1917KMP_BUILD_ASSERT(
sizeof(kmp_sched_flags_t) == 4);
1919#if KMP_STATIC_STEAL_ENABLED
1920typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1927 kmp_lock_t *steal_lock;
1929 kmp_uint32 ordered_lower;
1930 kmp_uint32 ordered_upper;
1938 struct KMP_ALIGN(32) {
1945#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1947 kmp_uint32 num_procs_with_pcore;
1948 kmp_int32 first_thread_with_ecore;
1951 kmp_int32 last_upper;
1953} dispatch_private_info32_t;
1955#if CACHE_LINE <= 128
1956KMP_BUILD_ASSERT(
sizeof(dispatch_private_info32_t) <= 128);
1959typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1966 kmp_lock_t *steal_lock;
1968 kmp_uint64 ordered_lower;
1969 kmp_uint64 ordered_upper;
1977 struct KMP_ALIGN(32) {
1984#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1986 kmp_uint64 num_procs_with_pcore;
1987 kmp_int64 first_thread_with_ecore;
1991 kmp_int64 last_upper;
1993} dispatch_private_info64_t;
1995#if CACHE_LINE <= 128
1996KMP_BUILD_ASSERT(
sizeof(dispatch_private_info64_t) <= 128);
2000typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
2013 kmp_uint32 ordered_lower;
2014 kmp_uint32 ordered_upper;
2016 kmp_int32 last_upper;
2018} dispatch_private_info32_t;
2020typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
2034 kmp_uint64 ordered_lower;
2035 kmp_uint64 ordered_upper;
2037 kmp_int64 last_upper;
2039} dispatch_private_info64_t;
2042typedef struct KMP_ALIGN_CACHE dispatch_private_info {
2043 union private_info {
2044 dispatch_private_info32_t p32;
2045 dispatch_private_info64_t p64;
2048 kmp_sched_flags_t flags;
2049 std::atomic<kmp_uint32> steal_flag;
2050 kmp_int32 ordered_bumped;
2052 struct dispatch_private_info *next;
2053 kmp_int32 type_size;
2054#if KMP_USE_HIER_SCHED
2058 enum cons_type pushed_ws;
2059} dispatch_private_info_t;
2061typedef struct dispatch_shared_info32 {
2064 volatile kmp_uint32 iteration;
2065 volatile kmp_int32 num_done;
2066 volatile kmp_uint32 ordered_iteration;
2068 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
2069} dispatch_shared_info32_t;
2071typedef struct dispatch_shared_info64 {
2074 volatile kmp_uint64 iteration;
2075 volatile kmp_int64 num_done;
2076 volatile kmp_uint64 ordered_iteration;
2078 kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
2079} dispatch_shared_info64_t;
2081typedef struct dispatch_shared_info {
2083 dispatch_shared_info32_t s32;
2084 dispatch_shared_info64_t s64;
2086 volatile kmp_uint32 buffer_index;
2087 volatile kmp_int32 doacross_buf_idx;
2088 volatile kmp_uint32 *doacross_flags;
2089 kmp_int32 doacross_num_done;
2090#if KMP_USE_HIER_SCHED
2093#if KMP_HWLOC_ENABLED
2099} dispatch_shared_info_t;
2101typedef struct kmp_disp {
2103 void (*th_deo_fcn)(
int *gtid,
int *cid,
ident_t *);
2105 void (*th_dxo_fcn)(
int *gtid,
int *cid,
ident_t *);
2107 dispatch_shared_info_t *th_dispatch_sh_current;
2108 dispatch_private_info_t *th_dispatch_pr_current;
2110 dispatch_private_info_t *th_disp_buffer;
2111 kmp_uint32 th_disp_index;
2112 kmp_int32 th_doacross_buf_idx;
2113 volatile kmp_uint32 *th_doacross_flags;
2114 kmp_int64 *th_doacross_info;
2115#if KMP_USE_INTERNODE_ALIGNMENT
2116 char more_padding[INTERNODE_CACHE_LINE];
2124#define KMP_INIT_BARRIER_STATE 0
2125#define KMP_BARRIER_SLEEP_BIT 0
2126#define KMP_BARRIER_UNUSED_BIT 1
2127#define KMP_BARRIER_BUMP_BIT 2
2129#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
2130#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
2131#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
2133#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
2134#error "Barrier sleep bit must be smaller than barrier bump bit"
2136#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
2137#error "Barrier unused bit must be smaller than barrier bump bit"
2141#define KMP_BARRIER_NOT_WAITING 0
2142#define KMP_BARRIER_OWN_FLAG \
2144#define KMP_BARRIER_PARENT_FLAG \
2146#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
2148#define KMP_BARRIER_SWITCHING \
2151#define KMP_NOT_SAFE_TO_REAP \
2153#define KMP_SAFE_TO_REAP 1
2165 bs_plain_barrier = 0,
2167 bs_forkjoin_barrier,
2168#if KMP_FAST_REDUCTION_BARRIER
2169 bs_reduction_barrier,
2175#if !KMP_FAST_REDUCTION_BARRIER
2176#define bs_reduction_barrier bs_plain_barrier
2179typedef enum kmp_bar_pat {
2186 bp_hierarchical_bar = 3,
2191#define KMP_BARRIER_ICV_PUSH 1
2194typedef struct kmp_internal_control {
2195 int serial_nesting_level;
2208 int task_thread_limit;
2209 int max_active_levels;
2212 kmp_proc_bind_t proc_bind;
2213 kmp_int32 default_device;
2214 struct kmp_internal_control *next;
2215} kmp_internal_control_t;
2217static inline void copy_icvs(kmp_internal_control_t *dst,
2218 kmp_internal_control_t *src) {
2223typedef struct KMP_ALIGN_CACHE kmp_bstate {
2228 kmp_internal_control_t th_fixed_icvs;
2231 volatile kmp_uint64 b_go;
2232 KMP_ALIGN_CACHE
volatile kmp_uint64
2234 kmp_uint32 *skip_per_level;
2235 kmp_uint32 my_level;
2236 kmp_int32 parent_tid;
2239 struct kmp_bstate *parent_bar;
2241 kmp_uint64 leaf_state;
2243 kmp_uint8 base_leaf_kids;
2244 kmp_uint8 leaf_kids;
2246 kmp_uint8 wait_flag;
2247 kmp_uint8 use_oncore_barrier;
2252 KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
2256union KMP_ALIGN_CACHE kmp_barrier_union {
2258 char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
2262typedef union kmp_barrier_union kmp_balign_t;
2265union KMP_ALIGN_CACHE kmp_barrier_team_union {
2267 char b_pad[CACHE_LINE];
2269 kmp_uint64 b_arrived;
2275 kmp_uint b_master_arrived;
2276 kmp_uint b_team_arrived;
2281typedef union kmp_barrier_team_union kmp_balign_team_t;
2288typedef struct kmp_win32_mutex {
2290 CRITICAL_SECTION cs;
2293typedef struct kmp_win32_cond {
2298 kmp_win32_mutex_t waiters_count_lock_;
2305 int wait_generation_count_;
2314union KMP_ALIGN_CACHE kmp_cond_union {
2316 char c_pad[CACHE_LINE];
2317 pthread_cond_t c_cond;
2320typedef union kmp_cond_union kmp_cond_align_t;
2322union KMP_ALIGN_CACHE kmp_mutex_union {
2324 char m_pad[CACHE_LINE];
2325 pthread_mutex_t m_mutex;
2328typedef union kmp_mutex_union kmp_mutex_align_t;
2332typedef struct kmp_desc_base {
2334 size_t ds_stacksize;
2336 kmp_thread_t ds_thread;
2337 volatile int ds_tid;
2340 volatile int ds_alive;
2357typedef union KMP_ALIGN_CACHE kmp_desc {
2359 char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
2363typedef struct kmp_local {
2364 volatile int this_construct;
2369#if !USE_CMP_XCHG_FOR_BGET
2370#ifdef USE_QUEUING_LOCK_FOR_BGET
2371 kmp_lock_t bget_lock;
2373 kmp_bootstrap_lock_t bget_lock;
2380 PACKED_REDUCTION_METHOD_T
2381 packed_reduction_method;
2386#define KMP_CHECK_UPDATE(a, b) \
2389#define KMP_CHECK_UPDATE_SYNC(a, b) \
2391 TCW_SYNC_PTR((a), (b))
2393#define get__blocktime(xteam, xtid) \
2394 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2395#define get__bt_set(xteam, xtid) \
2396 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2398#define get__bt_intervals(xteam, xtid) \
2399 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2402#define get__dynamic_2(xteam, xtid) \
2403 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2404#define get__nproc_2(xteam, xtid) \
2405 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2406#define get__sched_2(xteam, xtid) \
2407 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2409#define set__blocktime_team(xteam, xtid, xval) \
2410 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2414#define set__bt_intervals_team(xteam, xtid, xval) \
2415 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2419#define set__bt_set_team(xteam, xtid, xval) \
2420 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2422#define set__dynamic(xthread, xval) \
2423 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2424#define get__dynamic(xthread) \
2425 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2427#define set__nproc(xthread, xval) \
2428 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2430#define set__thread_limit(xthread, xval) \
2431 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2433#define set__max_active_levels(xthread, xval) \
2434 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2436#define get__max_active_levels(xthread) \
2437 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2439#define set__sched(xthread, xval) \
2440 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2442#define set__proc_bind(xthread, xval) \
2443 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2444#define get__proc_bind(xthread) \
2445 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2449typedef enum kmp_tasking_mode {
2450 tskm_immediate_exec = 0,
2451 tskm_extra_barrier = 1,
2452 tskm_task_teams = 2,
2454} kmp_tasking_mode_t;
2456extern kmp_tasking_mode_t
2458extern int __kmp_task_stealing_constraint;
2459extern int __kmp_enable_task_throttling;
2460extern kmp_int32 __kmp_default_device;
2463extern kmp_int32 __kmp_max_task_priority;
2465extern kmp_uint64 __kmp_taskloop_min_tasks;
2469#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2470#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2474#define KMP_TASKING_ENABLED(task_team) \
2475 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2483typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32,
void *);
2485typedef union kmp_cmplrdata {
2496typedef struct kmp_task {
2503 kmp_cmplrdata_t data2;
2512typedef struct kmp_taskgroup {
2513 std::atomic<kmp_int32> count;
2514 std::atomic<kmp_int32>
2516 struct kmp_taskgroup *parent;
2519 kmp_int32 reduce_num_data;
2520 uintptr_t *gomp_data;
2524typedef union kmp_depnode kmp_depnode_t;
2525typedef struct kmp_depnode_list kmp_depnode_list_t;
2526typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2529#define KMP_DEP_IN 0x1
2530#define KMP_DEP_OUT 0x2
2531#define KMP_DEP_INOUT 0x3
2532#define KMP_DEP_MTX 0x4
2533#define KMP_DEP_SET 0x8
2534#define KMP_DEP_ALL 0x80
2537typedef struct kmp_depend_info {
2538 kmp_intptr_t base_addr;
2543#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2546 unsigned unused : 3;
2556 unsigned unused : 3;
2564struct kmp_depnode_list {
2565 kmp_depnode_t *node;
2566 kmp_depnode_list_t *next;
2570#define MAX_MTX_DEPS 4
2572typedef struct kmp_base_depnode {
2573 kmp_depnode_list_t *successors;
2575 kmp_lock_t *mtx_locks[MAX_MTX_DEPS];
2576 kmp_int32 mtx_num_locks;
2578#if KMP_SUPPORT_GRAPH_OUTPUT
2581 std::atomic<kmp_int32> npredecessors;
2582 std::atomic<kmp_int32> nrefs;
2583} kmp_base_depnode_t;
2585union KMP_ALIGN_CACHE kmp_depnode {
2587 char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2588 kmp_base_depnode_t dn;
2591struct kmp_dephash_entry {
2593 kmp_depnode_t *last_out;
2594 kmp_depnode_list_t *last_set;
2595 kmp_depnode_list_t *prev_set;
2596 kmp_uint8 last_flag;
2597 kmp_lock_t *mtx_lock;
2598 kmp_dephash_entry_t *next_in_bucket;
2601typedef struct kmp_dephash {
2602 kmp_dephash_entry_t **buckets;
2604 kmp_depnode_t *last_all;
2606 kmp_uint32 nelements;
2607 kmp_uint32 nconflicts;
2610typedef struct kmp_task_affinity_info {
2611 kmp_intptr_t base_addr;
2616 kmp_int32 reserved : 30;
2618} kmp_task_affinity_info_t;
2620typedef enum kmp_event_type_t {
2621 KMP_EVENT_UNINITIALIZED = 0,
2622 KMP_EVENT_ALLOW_COMPLETION = 1
2626 kmp_event_type_t type;
2627 kmp_tas_lock_t lock;
2635#define INIT_MAPSIZE 50
2637typedef struct kmp_taskgraph_flags {
2638 unsigned nowait : 1;
2639 unsigned re_record : 1;
2640 unsigned reserved : 30;
2641} kmp_taskgraph_flags_t;
2644typedef struct kmp_node_info {
2646 kmp_int32 *successors;
2647 kmp_int32 nsuccessors;
2648 std::atomic<kmp_int32>
2649 npredecessors_counter;
2650 kmp_int32 npredecessors;
2651 kmp_int32 successors_size;
2652 kmp_taskdata_t *parent_task;
2656typedef enum kmp_tdg_status {
2658 KMP_TDG_RECORDING = 1,
2663typedef struct kmp_tdg_info {
2665 kmp_taskgraph_flags_t tdg_flags;
2667 kmp_int32 num_roots;
2668 kmp_int32 *root_tasks;
2669 kmp_node_info_t *record_map;
2670 kmp_tdg_status_t tdg_status =
2672 std::atomic<kmp_int32> num_tasks;
2673 kmp_bootstrap_lock_t
2676 void *rec_taskred_data;
2678 kmp_int32 rec_num_taskred;
2681extern int __kmp_tdg_dot;
2682extern kmp_int32 __kmp_max_tdgs;
2683extern kmp_tdg_info_t **__kmp_global_tdgs;
2684extern kmp_int32 __kmp_curr_tdg_idx;
2685extern kmp_int32 __kmp_successors_size;
2686extern std::atomic<kmp_int32> __kmp_tdg_task_id;
2687extern kmp_int32 __kmp_num_tdg;
2690typedef struct kmp_tasking_flags {
2691#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2694 unsigned reserved31 : 4;
2697 unsigned reserved31 : 5;
2699 unsigned hidden_helper : 1;
2700 unsigned target : 1;
2701 unsigned native : 1;
2703 unsigned complete : 1;
2704 unsigned executing : 1;
2705 unsigned started : 1;
2706 unsigned team_serial : 1;
2707 unsigned tasking_ser : 1;
2708 unsigned task_serial : 1;
2709 unsigned tasktype : 1;
2710 unsigned reserved : 7;
2711 unsigned transparent : 1;
2712 unsigned free_agent_eligible : 1;
2713 unsigned detachable : 1;
2714 unsigned priority_specified : 1;
2716 unsigned destructors_thunk : 1;
2717 unsigned merged_if0 : 1;
2719 unsigned tiedness : 1;
2722 unsigned tiedness : 1;
2724 unsigned merged_if0 : 1;
2726 unsigned destructors_thunk : 1;
2730 unsigned priority_specified : 1;
2732 unsigned detachable : 1;
2733 unsigned free_agent_eligible : 1;
2735 unsigned transparent : 1;
2736 unsigned reserved : 7;
2739 unsigned tasktype : 1;
2740 unsigned task_serial : 1;
2741 unsigned tasking_ser : 1;
2743 unsigned team_serial : 1;
2747 unsigned started : 1;
2748 unsigned executing : 1;
2749 unsigned complete : 1;
2751 unsigned native : 1;
2752 unsigned target : 1;
2753 unsigned hidden_helper : 1;
2756 unsigned reserved31 : 4;
2758 unsigned reserved31 : 5;
2761} kmp_tasking_flags_t;
2763typedef struct kmp_target_data {
2767struct kmp_taskdata {
2768 kmp_int32 td_task_id;
2769 kmp_tasking_flags_t td_flags;
2770 kmp_team_t *td_team;
2771 kmp_info_p *td_alloc_thread;
2773 kmp_taskdata_t *td_parent;
2775 std::atomic<kmp_int32> td_untied_count;
2779 kmp_uint32 td_taskwait_counter;
2780 kmp_int32 td_taskwait_thread;
2781 KMP_ALIGN_CACHE kmp_internal_control_t
2783 KMP_ALIGN_CACHE std::atomic<kmp_int32>
2784 td_allocated_child_tasks;
2786 std::atomic<kmp_int32>
2787 td_incomplete_child_tasks;
2794 kmp_task_team_t *td_task_team;
2795 size_t td_size_alloc;
2796#if defined(KMP_GOMP_COMPAT)
2798 kmp_int32 td_size_loop_bounds;
2800 kmp_taskdata_t *td_last_tied;
2801#if defined(KMP_GOMP_COMPAT)
2803 void (*td_copy_func)(
void *,
void *);
2805 kmp_event_t td_allow_completion_event;
2807 ompt_task_info_t ompt_task_info;
2810 bool is_taskgraph = 0;
2811 kmp_tdg_info_t *tdg;
2812 kmp_int32 td_tdg_task_id;
2814 kmp_target_data_t td_target_data;
2818KMP_BUILD_ASSERT(
sizeof(kmp_taskdata_t) %
sizeof(
void *) == 0);
2821typedef struct kmp_base_thread_data {
2825 kmp_bootstrap_lock_t td_deque_lock;
2828 kmp_int32 td_deque_size;
2829 kmp_uint32 td_deque_head;
2830 kmp_uint32 td_deque_tail;
2831 kmp_int32 td_deque_ntasks;
2833 kmp_int32 td_deque_last_stolen;
2834} kmp_base_thread_data_t;
2836#define TASK_DEQUE_BITS 8
2837#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2839#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2840#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2842typedef union KMP_ALIGN_CACHE kmp_thread_data {
2843 kmp_base_thread_data_t td;
2845 char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2848typedef struct kmp_task_pri {
2849 kmp_thread_data_t td;
2855typedef struct kmp_base_task_team {
2856 kmp_bootstrap_lock_t
2861 kmp_bootstrap_lock_t tt_task_pri_lock;
2862 kmp_task_pri_t *tt_task_pri_list;
2864 kmp_task_team_t *tt_next;
2868 kmp_int32 tt_found_tasks;
2872 kmp_int32 tt_max_threads;
2873 kmp_int32 tt_found_proxy_tasks;
2874 kmp_int32 tt_untied_task_encountered;
2875 std::atomic<kmp_int32> tt_num_task_pri;
2878 kmp_int32 tt_hidden_helper_task_encountered;
2881 std::atomic<kmp_int32> tt_unfinished_threads;
2886} kmp_base_task_team_t;
2888union KMP_ALIGN_CACHE kmp_task_team {
2889 kmp_base_task_team_t tt;
2891 char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2894typedef struct kmp_task_team_list_t {
2895 kmp_task_team_t *task_team;
2896 kmp_task_team_list_t *next;
2897} kmp_task_team_list_t;
2899#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2902typedef struct kmp_free_list {
2903 void *th_free_list_self;
2904 void *th_free_list_sync;
2906 void *th_free_list_other;
2912typedef struct kmp_hot_team_ptr {
2913 kmp_team_p *hot_team;
2914 kmp_int32 hot_team_nth;
2915} kmp_hot_team_ptr_t;
2916typedef struct kmp_teams_size {
2932typedef struct kmp_cg_root {
2933 kmp_info_p *cg_root;
2936 kmp_int32 cg_thread_limit;
2937 kmp_int32 cg_nthreads;
2938 struct kmp_cg_root *up;
2943typedef struct KMP_ALIGN_CACHE kmp_base_info {
2949 kmp_team_p *th_team;
2950 kmp_root_p *th_root;
2951 kmp_info_p *th_next_pool;
2952 kmp_disp_t *th_dispatch;
2958 kmp_info_p *th_team_master;
2959 int th_team_serialized;
2960 microtask_t th_teams_microtask;
2969 int th_team_bt_intervals;
2972 kmp_uint64 th_team_bt_intervals;
2975#if KMP_AFFINITY_SUPPORTED
2976 kmp_affin_mask_t *th_affin_mask;
2977 kmp_affinity_ids_t th_topology_ids;
2978 kmp_affinity_attrs_t th_topology_attrs;
2980 omp_allocator_handle_t th_def_allocator;
2984 int *th_set_nested_nth;
2988 const char *th_nt_msg;
2989 int th_set_nested_nth_sz;
2990 kmp_hot_team_ptr_t *th_hot_teams;
2995#if KMP_AFFINITY_SUPPORTED
2996 int th_current_place;
3002 int th_prev_num_threads;
3004 kmp_uint64 th_bar_arrive_time;
3005 kmp_uint64 th_bar_min_time;
3006 kmp_uint64 th_frame_time;
3008 kmp_local_t th_local;
3009 struct private_common *th_pri_head;
3014 KMP_ALIGN_CACHE kmp_team_p
3018 ompt_thread_info_t ompt_thread_info;
3022 struct common_table *th_pri_common;
3024 volatile kmp_uint32 th_spin_here;
3027 volatile void *th_sleep_loc;
3028 flag_type th_sleep_loc_type;
3035 kmp_task_team_t *th_task_team;
3036 kmp_taskdata_t *th_current_task;
3037 kmp_uint8 th_task_state;
3038 kmp_uint32 th_reap_state;
3043 kmp_uint8 th_active_in_pool;
3045 std::atomic<kmp_uint32> th_used_in_team;
3048 struct cons_header *th_cons;
3049#if KMP_USE_HIER_SCHED
3051 kmp_hier_private_bdata_t *th_hier_bar_data;
3055 KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
3057 KMP_ALIGN_CACHE
volatile kmp_int32
3060#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
3062 kmp_free_list_t th_free_lists[NUM_LISTS];
3067 kmp_win32_cond_t th_suspend_cv;
3068 kmp_win32_mutex_t th_suspend_mx;
3069 std::atomic<int> th_suspend_init;
3072 kmp_cond_align_t th_suspend_cv;
3073 kmp_mutex_align_t th_suspend_mx;
3074 std::atomic<int> th_suspend_init_count;
3078 kmp_itt_mark_t th_itt_mark_single;
3081#if KMP_STATS_ENABLED
3082 kmp_stats_list *th_stats;
3085 std::atomic<bool> th_blocking;
3087 kmp_cg_root_t *th_cg_roots;
3090typedef union KMP_ALIGN_CACHE kmp_info {
3092 char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
3098typedef struct kmp_base_data {
3099 volatile kmp_uint32 t_value;
3102typedef union KMP_ALIGN_CACHE kmp_sleep_team {
3104 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3108typedef union KMP_ALIGN_CACHE kmp_ordered_team {
3110 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3112} kmp_ordered_team_t;
3114typedef int (*launch_t)(
int gtid);
3117#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
3123#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3124#define KMP_INLINE_ARGV_BYTES \
3126 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
3127 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
3130#define KMP_INLINE_ARGV_BYTES \
3131 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
3133#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
3135typedef struct KMP_ALIGN_CACHE kmp_base_team {
3138 KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
3139 kmp_balign_team_t t_bar[bs_last_barrier];
3140 std::atomic<int> t_construct;
3141 char pad[
sizeof(kmp_lock_t)];
3144 std::atomic<void *> t_tg_reduce_data[2];
3145 std::atomic<int> t_tg_fini_counter[2];
3149 KMP_ALIGN_CACHE
int t_master_tid;
3150 int t_master_this_cons;
3154 kmp_team_p *t_parent;
3155 kmp_team_p *t_next_pool;
3156 kmp_disp_t *t_dispatch;
3157 kmp_task_team_t *t_task_team[2];
3158 kmp_proc_bind_t t_proc_bind;
3159 int t_primary_task_state;
3161 kmp_uint64 t_region_time;
3166 KMP_ALIGN_CACHE
void **t_argv;
3173 ompt_team_info_t ompt_team_info;
3174 ompt_lw_taskteam_t *ompt_serialized_team_info;
3177#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3178 kmp_int8 t_fp_control_saved;
3180 kmp_int16 t_x87_fpu_control_word;
3184 void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
3186 KMP_ALIGN_CACHE kmp_info_t **t_threads;
3188 *t_implicit_task_taskdata;
3191 KMP_ALIGN_CACHE
int t_max_argc;
3194 dispatch_shared_info_t *t_disp_buffer;
3197 kmp_r_sched_t t_sched;
3198#if KMP_AFFINITY_SUPPORTED
3202 int t_display_affinity;
3205 omp_allocator_handle_t t_def_allocator;
3208#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
3213 char dummy_padding[1024];
3216 KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
3219 std::atomic<kmp_int32> t_cancel_request;
3220 int t_master_active;
3221 void *t_copypriv_data;
3223 std::atomic<kmp_uint32> t_copyin_counter;
3228 distributedBarrier *b;
3229 kmp_nested_nthreads_t *t_nested_nth;
3234KMP_BUILD_ASSERT(
sizeof(kmp_task_team_t *[2]) ==
sizeof(kmp_task_team_list_t));
3235KMP_BUILD_ASSERT(
alignof(kmp_task_team_t *[2]) ==
3236 alignof(kmp_task_team_list_t));
3238union KMP_ALIGN_CACHE kmp_team {
3241 char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
3244typedef union KMP_ALIGN_CACHE kmp_time_global {
3246 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
3250typedef struct kmp_base_global {
3252 kmp_time_global_t g_time;
3255 volatile int g_abort;
3256 volatile int g_done;
3259 enum dynamic_mode g_dynamic_mode;
3262typedef union KMP_ALIGN_CACHE kmp_global {
3263 kmp_base_global_t g;
3265 char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
3268typedef struct kmp_base_root {
3273 volatile int r_active;
3275 std::atomic<int> r_in_parallel;
3277 kmp_team_t *r_root_team;
3278 kmp_team_t *r_hot_team;
3279 kmp_info_t *r_uber_thread;
3280 kmp_lock_t r_begin_lock;
3281 volatile int r_begin;
3283#if KMP_AFFINITY_SUPPORTED
3284 int r_affinity_assigned;
3288typedef union KMP_ALIGN_CACHE kmp_root {
3291 char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
3294struct fortran_inx_info {
3302typedef struct kmp_old_threads_list_t {
3303 kmp_info_t **threads;
3304 struct kmp_old_threads_list_t *next;
3305} kmp_old_threads_list_t;
3309extern int __kmp_settings;
3310extern int __kmp_duplicate_library_ok;
3312extern int __kmp_forkjoin_frames;
3313extern int __kmp_forkjoin_frames_mode;
3315extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
3316extern int __kmp_determ_red;
3319extern int kmp_a_debug;
3320extern int kmp_b_debug;
3321extern int kmp_c_debug;
3322extern int kmp_d_debug;
3323extern int kmp_e_debug;
3324extern int kmp_f_debug;
3328#define KMP_DEBUG_BUF_LINES_INIT 512
3329#define KMP_DEBUG_BUF_LINES_MIN 1
3331#define KMP_DEBUG_BUF_CHARS_INIT 128
3332#define KMP_DEBUG_BUF_CHARS_MIN 2
3336extern int __kmp_debug_buf_lines;
3338 __kmp_debug_buf_chars;
3339extern int __kmp_debug_buf_atomic;
3342extern char *__kmp_debug_buffer;
3343extern std::atomic<int> __kmp_debug_count;
3345extern int __kmp_debug_buf_warn_chars;
3350extern int __kmp_par_range;
3352#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3353extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3354#define KMP_PAR_RANGE_FILENAME_LEN 1024
3355extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3356extern int __kmp_par_range_lb;
3357extern int __kmp_par_range_ub;
3363extern int __kmp_storage_map_verbose;
3365extern int __kmp_storage_map_verbose_specified;
3367#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3368extern kmp_cpuinfo_t __kmp_cpuinfo;
3369static inline bool __kmp_is_hybrid_cpu() {
return __kmp_cpuinfo.flags.hybrid; }
3370#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3371static inline bool __kmp_is_hybrid_cpu() {
return true; }
3373static inline bool __kmp_is_hybrid_cpu() {
return false; }
3376extern volatile int __kmp_init_serial;
3377extern volatile int __kmp_init_gtid;
3378extern volatile int __kmp_init_common;
3379extern volatile int __kmp_need_register_serial;
3380extern volatile int __kmp_init_middle;
3381extern volatile int __kmp_init_parallel;
3383extern volatile int __kmp_init_monitor;
3385extern volatile int __kmp_init_user_locks;
3386extern volatile int __kmp_init_hidden_helper_threads;
3387extern int __kmp_init_counter;
3388extern int __kmp_root_counter;
3389extern int __kmp_version;
3392extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
3395extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
3396extern kmp_uint32 __kmp_barrier_release_bb_dflt;
3397extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
3398extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
3399extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
3400extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
3401extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
3402extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
3403extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
3404extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
3405extern char const *__kmp_barrier_type_name[bs_last_barrier];
3406extern char const *__kmp_barrier_pattern_name[bp_last_bar];
3409extern kmp_bootstrap_lock_t __kmp_initz_lock;
3410extern kmp_bootstrap_lock_t __kmp_forkjoin_lock;
3411extern kmp_bootstrap_lock_t __kmp_task_team_lock;
3412extern kmp_bootstrap_lock_t
3415extern kmp_bootstrap_lock_t
3418extern kmp_bootstrap_lock_t
3419 __kmp_tp_cached_lock;
3422extern kmp_lock_t __kmp_global_lock;
3424extern enum library_type __kmp_library;
3430extern int __kmp_chunk;
3431extern int __kmp_force_monotonic;
3433extern size_t __kmp_stksize;
3435extern size_t __kmp_monitor_stksize;
3437extern size_t __kmp_stkoffset;
3438extern int __kmp_stkpadding;
3441 __kmp_malloc_pool_incr;
3442extern int __kmp_env_stksize;
3443extern int __kmp_env_blocktime;
3444extern int __kmp_env_checks;
3445extern int __kmp_env_consistency_check;
3446extern int __kmp_generate_warnings;
3447extern int __kmp_reserve_warn;
3450extern int __kmp_suspend_count;
3453extern kmp_int32 __kmp_use_yield;
3454extern kmp_int32 __kmp_use_yield_exp_set;
3455extern kmp_uint32 __kmp_yield_init;
3456extern kmp_uint32 __kmp_yield_next;
3457extern kmp_uint64 __kmp_pause_init;
3460extern int __kmp_allThreadsSpecified;
3462extern size_t __kmp_align_alloc;
3464extern int __kmp_xproc;
3465extern int __kmp_avail_proc;
3466extern size_t __kmp_sys_min_stksize;
3467extern int __kmp_sys_max_nth;
3469extern int __kmp_max_nth;
3471extern int __kmp_cg_max_nth;
3472extern int __kmp_task_max_nth;
3473extern int __kmp_teams_max_nth;
3474extern int __kmp_threads_capacity;
3476extern int __kmp_dflt_team_nth;
3478extern int __kmp_dflt_team_nth_ub;
3480extern int __kmp_tp_capacity;
3482extern int __kmp_tp_cached;
3484extern int __kmp_dflt_blocktime;
3486extern char __kmp_blocktime_units;
3487extern bool __kmp_wpolicy_passive;
3490static inline void __kmp_aux_convert_blocktime(
int *bt) {
3491 if (__kmp_blocktime_units ==
'm') {
3492 if (*bt > INT_MAX / 1000) {
3493 *bt = INT_MAX / 1000;
3494 KMP_INFORM(MaxValueUsing,
"kmp_set_blocktime(ms)", bt);
3502 __kmp_monitor_wakeups;
3503extern int __kmp_bt_intervals;
3506#ifdef KMP_ADJUST_BLOCKTIME
3507extern int __kmp_zero_bt;
3509#ifdef KMP_DFLT_NTH_CORES
3510extern int __kmp_ncores;
3513extern int __kmp_abort_delay;
3515extern int __kmp_need_register_atfork_specified;
3516extern int __kmp_need_register_atfork;
3518extern int __kmp_gtid_mode;
3526 __kmp_adjust_gtid_mode;
3527#ifdef KMP_TDATA_GTID
3528extern KMP_THREAD_LOCAL
int __kmp_gtid;
3530extern int __kmp_tls_gtid_min;
3531extern int __kmp_foreign_tp;
3532#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3533extern int __kmp_inherit_fp_control;
3534extern kmp_int16 __kmp_init_x87_fpu_control_word;
3535extern kmp_uint32 __kmp_init_mxcsr;
3540extern int __kmp_dflt_max_active_levels;
3543extern bool __kmp_dflt_max_active_levels_set;
3544extern int __kmp_dispatch_num_buffers;
3546extern int __kmp_hot_teams_mode;
3547extern int __kmp_hot_teams_max_level;
3549#if KMP_MIC_SUPPORTED
3550extern enum mic_type __kmp_mic_type;
3553#ifdef USE_LOAD_BALANCE
3554extern double __kmp_load_balance_interval;
3557#if KMP_USE_ADAPTIVE_LOCKS
3560struct kmp_adaptive_backoff_params_t {
3562 kmp_uint32 max_soft_retries;
3565 kmp_uint32 max_badness;
3568extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3570#if KMP_DEBUG_ADAPTIVE_LOCKS
3571extern const char *__kmp_speculative_statsfile;
3576extern int __kmp_display_env;
3577extern int __kmp_display_env_verbose;
3578extern int __kmp_omp_cancellation;
3579extern int __kmp_nteams;
3580extern int __kmp_teams_thread_limit;
3586extern kmp_info_t **__kmp_threads;
3588extern kmp_old_threads_list_t *__kmp_old_threads_list;
3590extern volatile kmp_team_t *__kmp_team_pool;
3591extern volatile kmp_info_t *__kmp_thread_pool;
3592extern kmp_info_t *__kmp_thread_pool_insert_pt;
3595extern volatile int __kmp_nth;
3598extern volatile int __kmp_all_nth;
3599extern std::atomic<int> __kmp_thread_pool_active_nth;
3601extern kmp_root_t **__kmp_root;
3605#define __kmp_get_gtid() __kmp_get_global_thread_id()
3606#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3607#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3608#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3609#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3614#define __kmp_get_team_num_threads(gtid) \
3615 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3617static inline bool KMP_UBER_GTID(
int gtid) {
3618 KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3619 KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3620 return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3621 __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3624static inline int __kmp_tid_from_gtid(
int gtid) {
3625 KMP_DEBUG_ASSERT(gtid >= 0);
3626 return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3629static inline int __kmp_gtid_from_tid(
int tid,
const kmp_team_t *team) {
3630 KMP_DEBUG_ASSERT(tid >= 0 && team);
3631 return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3634static inline int __kmp_gtid_from_thread(
const kmp_info_t *thr) {
3635 KMP_DEBUG_ASSERT(thr);
3636 return thr->th.th_info.ds.ds_gtid;
3639static inline kmp_info_t *__kmp_thread_from_gtid(
int gtid) {
3640 KMP_DEBUG_ASSERT(gtid >= 0);
3641 return __kmp_threads[gtid];
3644static inline kmp_team_t *__kmp_team_from_gtid(
int gtid) {
3645 KMP_DEBUG_ASSERT(gtid >= 0);
3646 return __kmp_threads[gtid]->th.th_team;
3649static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
3650 if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
3651 KMP_FATAL(ThreadIdentInvalid);
3654#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3655extern int __kmp_user_level_mwait;
3656extern int __kmp_umwait_enabled;
3657extern int __kmp_mwait_enabled;
3658extern int __kmp_mwait_hints;
3662extern int __kmp_waitpkg_enabled;
3663extern int __kmp_tpause_state;
3664extern int __kmp_tpause_hint;
3665extern int __kmp_tpause_enabled;
3670extern kmp_global_t __kmp_global;
3672extern kmp_info_t __kmp_monitor;
3674extern std::atomic<kmp_int32> __kmp_team_counter;
3676extern std::atomic<kmp_int32> __kmp_task_counter;
3679#define _KMP_GEN_ID(counter) \
3680 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3682#define _KMP_GEN_ID(counter) (~0)
3685#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3686#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3690extern void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
3691 size_t size,
char const *format, ...);
3693extern void __kmp_serial_initialize(
void);
3694extern void __kmp_middle_initialize(
void);
3695extern void __kmp_parallel_initialize(
void);
3697extern void __kmp_internal_begin(
void);
3698extern void __kmp_internal_end_library(
int gtid);
3699extern void __kmp_internal_end_thread(
int gtid);
3700extern void __kmp_internal_end_atexit(
void);
3701extern void __kmp_internal_end_dtor(
void);
3702extern void __kmp_internal_end_dest(
void *);
3704extern int __kmp_register_root(
int initial_thread);
3705extern void __kmp_unregister_root(
int gtid);
3706extern void __kmp_unregister_library(
void);
3708extern int __kmp_ignore_mppbeg(
void);
3709extern int __kmp_ignore_mppend(
void);
3711extern int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws);
3712extern void __kmp_exit_single(
int gtid);
3714extern void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3715extern void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3717#ifdef USE_LOAD_BALANCE
3718extern int __kmp_get_load_balance(
int);
3721extern int __kmp_get_global_thread_id(
void);
3722extern int __kmp_get_global_thread_id_reg(
void);
3723extern void __kmp_exit_thread(
int exit_status);
3724extern void __kmp_abort(
char const *format, ...);
3725extern void __kmp_abort_thread(
void);
3726KMP_NORETURN
extern void __kmp_abort_process(
void);
3727extern void __kmp_warn(
char const *format, ...);
3729extern void __kmp_set_num_threads(
int new_nth,
int gtid);
3731extern bool __kmp_detect_shm();
3732extern bool __kmp_detect_tmp();
3736static inline kmp_info_t *__kmp_entry_thread() {
3737 int gtid = __kmp_entry_gtid();
3739 return __kmp_threads[gtid];
3742extern void __kmp_set_max_active_levels(
int gtid,
int new_max_active_levels);
3743extern int __kmp_get_max_active_levels(
int gtid);
3744extern int __kmp_get_ancestor_thread_num(
int gtid,
int level);
3745extern int __kmp_get_team_size(
int gtid,
int level);
3746extern void __kmp_set_schedule(
int gtid, kmp_sched_t new_sched,
int chunk);
3747extern void __kmp_get_schedule(
int gtid, kmp_sched_t *sched,
int *chunk);
3749extern unsigned short __kmp_get_random(kmp_info_t *thread);
3750extern void __kmp_init_random(kmp_info_t *thread);
3752extern kmp_r_sched_t __kmp_get_schedule_global(
void);
3753extern void __kmp_adjust_num_threads(
int new_nproc);
3754extern void __kmp_check_stksize(
size_t *val);
3756extern void *___kmp_allocate(
size_t size KMP_SRC_LOC_DECL);
3757extern void *___kmp_page_allocate(
size_t size KMP_SRC_LOC_DECL);
3758extern void ___kmp_free(
void *ptr KMP_SRC_LOC_DECL);
3759#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3760#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3761#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3764extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3765 size_t size KMP_SRC_LOC_DECL);
3766extern void ___kmp_fast_free(kmp_info_t *this_thr,
void *ptr KMP_SRC_LOC_DECL);
3767extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3768extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3769#define __kmp_fast_allocate(this_thr, size) \
3770 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3771#define __kmp_fast_free(this_thr, ptr) \
3772 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3775extern void *___kmp_thread_malloc(kmp_info_t *th,
size_t size KMP_SRC_LOC_DECL);
3776extern void *___kmp_thread_calloc(kmp_info_t *th,
size_t nelem,
3777 size_t elsize KMP_SRC_LOC_DECL);
3778extern void *___kmp_thread_realloc(kmp_info_t *th,
void *ptr,
3779 size_t size KMP_SRC_LOC_DECL);
3780extern void ___kmp_thread_free(kmp_info_t *th,
void *ptr KMP_SRC_LOC_DECL);
3781#define __kmp_thread_malloc(th, size) \
3782 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3783#define __kmp_thread_calloc(th, nelem, elsize) \
3784 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3785#define __kmp_thread_realloc(th, ptr, size) \
3786 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3787#define __kmp_thread_free(th, ptr) \
3788 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3790extern void __kmp_push_num_threads(
ident_t *loc,
int gtid,
int num_threads);
3791extern void __kmp_push_num_threads_list(
ident_t *loc,
int gtid,
3792 kmp_uint32 list_length,
3793 int *num_threads_list);
3794extern void __kmp_set_strict_num_threads(
ident_t *loc,
int gtid,
int sev,
3797extern void __kmp_push_proc_bind(
ident_t *loc,
int gtid,
3798 kmp_proc_bind_t proc_bind);
3799extern void __kmp_push_num_teams(
ident_t *loc,
int gtid,
int num_teams,
3801extern void __kmp_push_num_teams_51(
ident_t *loc,
int gtid,
int num_teams_lb,
3802 int num_teams_ub,
int num_threads);
3804extern void __kmp_yield();
3808 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3809extern void __kmpc_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3811 kmp_uint32 ub, kmp_int32 st,
3813extern void __kmpc_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3815 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3816extern void __kmpc_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3818 kmp_uint64 ub, kmp_int64 st,
3821extern int __kmpc_dispatch_next_4(
ident_t *loc, kmp_int32 gtid,
3822 kmp_int32 *p_last, kmp_int32 *p_lb,
3823 kmp_int32 *p_ub, kmp_int32 *p_st);
3824extern int __kmpc_dispatch_next_4u(
ident_t *loc, kmp_int32 gtid,
3825 kmp_int32 *p_last, kmp_uint32 *p_lb,
3826 kmp_uint32 *p_ub, kmp_int32 *p_st);
3827extern int __kmpc_dispatch_next_8(
ident_t *loc, kmp_int32 gtid,
3828 kmp_int32 *p_last, kmp_int64 *p_lb,
3829 kmp_int64 *p_ub, kmp_int64 *p_st);
3830extern int __kmpc_dispatch_next_8u(
ident_t *loc, kmp_int32 gtid,
3831 kmp_int32 *p_last, kmp_uint64 *p_lb,
3832 kmp_uint64 *p_ub, kmp_int64 *p_st);
3834extern void __kmpc_dispatch_fini_4(
ident_t *loc, kmp_int32 gtid);
3835extern void __kmpc_dispatch_fini_8(
ident_t *loc, kmp_int32 gtid);
3836extern void __kmpc_dispatch_fini_4u(
ident_t *loc, kmp_int32 gtid);
3837extern void __kmpc_dispatch_fini_8u(
ident_t *loc, kmp_int32 gtid);
3839extern void __kmpc_dispatch_deinit(
ident_t *loc, kmp_int32 gtid);
3841#ifdef KMP_GOMP_COMPAT
3843extern void __kmp_aux_dispatch_init_4(
ident_t *loc, kmp_int32 gtid,
3845 kmp_int32 ub, kmp_int32 st,
3846 kmp_int32 chunk,
int push_ws);
3847extern void __kmp_aux_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3849 kmp_uint32 ub, kmp_int32 st,
3850 kmp_int32 chunk,
int push_ws);
3851extern void __kmp_aux_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3853 kmp_int64 ub, kmp_int64 st,
3854 kmp_int64 chunk,
int push_ws);
3855extern void __kmp_aux_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3857 kmp_uint64 ub, kmp_int64 st,
3858 kmp_int64 chunk,
int push_ws);
3859extern void __kmp_aux_dispatch_fini_chunk_4(
ident_t *loc, kmp_int32 gtid);
3860extern void __kmp_aux_dispatch_fini_chunk_8(
ident_t *loc, kmp_int32 gtid);
3861extern void __kmp_aux_dispatch_fini_chunk_4u(
ident_t *loc, kmp_int32 gtid);
3862extern void __kmp_aux_dispatch_fini_chunk_8u(
ident_t *loc, kmp_int32 gtid);
3866extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3867extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3868extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3869extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3870extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3871extern kmp_uint32 __kmp_wait_4(kmp_uint32
volatile *spinner, kmp_uint32 checker,
3872 kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3874extern void __kmp_wait_4_ptr(
void *spinner, kmp_uint32 checker,
3875 kmp_uint32 (*pred)(
void *, kmp_uint32),
void *obj);
3877extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag,
3884extern void __kmp_release_64(kmp_flag_64<> *flag);
3886extern void __kmp_infinite_loop(
void);
3888extern void __kmp_cleanup(
void);
3890#if KMP_HANDLE_SIGNALS
3891extern int __kmp_handle_signals;
3892extern void __kmp_install_signals(
int parallel_init);
3893extern void __kmp_remove_signals(
void);
3896extern void __kmp_clear_system_time(
void);
3897extern void __kmp_read_system_time(
double *delta);
3899extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3901extern void __kmp_expand_host_name(
char *buffer,
size_t size);
3902extern void __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern);
3904#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM))
3906__kmp_initialize_system_tick(
void);
3910__kmp_runtime_initialize(
void);
3911extern void __kmp_runtime_destroy(
void);
3913#if KMP_AFFINITY_SUPPORTED
3914extern char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
3915 kmp_affin_mask_t *mask);
3916extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
3917 kmp_affin_mask_t *mask);
3918extern void __kmp_affinity_initialize(kmp_affinity_t &affinity);
3919extern void __kmp_affinity_uninitialize(
void);
3920extern void __kmp_affinity_set_init_mask(
3921 int gtid,
int isa_root);
3922void __kmp_affinity_bind_init_mask(
int gtid);
3923extern void __kmp_affinity_bind_place(
int gtid);
3924extern void __kmp_affinity_determine_capable(
const char *env_var);
3925extern int __kmp_aux_set_affinity(
void **mask);
3926extern int __kmp_aux_get_affinity(
void **mask);
3927extern int __kmp_aux_get_affinity_max_proc();
3928extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask);
3929extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask);
3930extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask);
3931extern void __kmp_balanced_affinity(kmp_info_t *th,
int team_size);
3932#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
3933extern int __kmp_get_first_osid_with_ecore(
void);
3935#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3937extern int kmp_set_thread_affinity_mask_initial(
void);
3939static inline void __kmp_assign_root_init_mask() {
3940 int gtid = __kmp_entry_gtid();
3941 kmp_root_t *r = __kmp_threads[gtid]->th.th_root;
3942 if (r->r.r_uber_thread == __kmp_threads[gtid] && !r->r.r_affinity_assigned) {
3943 __kmp_affinity_set_init_mask(gtid, TRUE);
3944 __kmp_affinity_bind_init_mask(gtid);
3945 r->r.r_affinity_assigned = TRUE;
3948static inline void __kmp_reset_root_init_mask(
int gtid) {
3949 if (!KMP_AFFINITY_CAPABLE())
3951 kmp_info_t *th = __kmp_threads[gtid];
3952 kmp_root_t *r = th->th.th_root;
3953 if (r->r.r_uber_thread == th && r->r.r_affinity_assigned) {
3954 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
3955 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
3956 r->r.r_affinity_assigned = FALSE;
3960#define __kmp_assign_root_init_mask()
3961static inline void __kmp_reset_root_init_mask(
int gtid) {}
3966extern size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
3967 kmp_str_buf_t *buffer);
3968extern void __kmp_aux_display_affinity(
int gtid,
const char *format);
3970extern void __kmp_cleanup_hierarchy();
3971extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3975extern int __kmp_futex_determine_capable(
void);
3979extern void __kmp_gtid_set_specific(
int gtid);
3980extern int __kmp_gtid_get_specific(
void);
3982extern double __kmp_read_cpu_time(
void);
3984extern int __kmp_read_system_info(
struct kmp_sys_info *info);
3987extern void __kmp_create_monitor(kmp_info_t *th);
3990extern void *__kmp_launch_thread(kmp_info_t *thr);
3992extern void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size);
3995extern int __kmp_still_running(kmp_info_t *th);
3996extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3997extern void __kmp_free_handle(kmp_thread_t tHandle);
4001extern void __kmp_reap_monitor(kmp_info_t *th);
4003extern void __kmp_reap_worker(kmp_info_t *th);
4004extern void __kmp_terminate_thread(
int gtid);
4006extern int __kmp_try_suspend_mx(kmp_info_t *th);
4007extern void __kmp_lock_suspend_mx(kmp_info_t *th);
4008extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
4010extern void __kmp_elapsed(
double *);
4011extern void __kmp_elapsed_tick(
double *);
4013extern void __kmp_enable(
int old_state);
4014extern void __kmp_disable(
int *old_state);
4016extern void __kmp_thread_sleep(
int millis);
4018extern void __kmp_common_initialize(
void);
4019extern void __kmp_common_destroy(
void);
4020extern void __kmp_common_destroy_gtid(
int gtid);
4023extern void __kmp_register_atfork(
void);
4025extern void __kmp_suspend_initialize(
void);
4026extern void __kmp_suspend_initialize_thread(kmp_info_t *th);
4027extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
4029extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4031extern kmp_team_t *__kmp_allocate_team(kmp_root_t *root,
int new_nproc,
4034 ompt_data_t ompt_parallel_data,
4036 kmp_proc_bind_t proc_bind,
4037 kmp_internal_control_t *new_icvs,
4038 int argc, kmp_info_t *thr);
4039extern void __kmp_free_thread(kmp_info_t *);
4040extern void __kmp_free_team(kmp_root_t *, kmp_team_t *, kmp_info_t *);
4041extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
4045extern void __kmp_initialize_bget(kmp_info_t *th);
4046extern void __kmp_finalize_bget(kmp_info_t *th);
4048KMP_EXPORT
void *kmpc_malloc(
size_t size);
4049KMP_EXPORT
void *kmpc_aligned_malloc(
size_t size,
size_t alignment);
4050KMP_EXPORT
void *kmpc_calloc(
size_t nelem,
size_t elsize);
4051KMP_EXPORT
void *kmpc_realloc(
void *ptr,
size_t size);
4052KMP_EXPORT
void kmpc_free(
void *ptr);
4056extern int __kmp_barrier(
enum barrier_type bt,
int gtid,
int is_split,
4057 size_t reduce_size,
void *reduce_data,
4058 void (*reduce)(
void *,
void *));
4059extern void __kmp_end_split_barrier(
enum barrier_type bt,
int gtid);
4060extern int __kmp_barrier_gomp_cancel(
int gtid);
4066enum fork_context_e {
4072extern int __kmp_fork_call(
ident_t *loc,
int gtid,
4073 enum fork_context_e fork_context, kmp_int32 argc,
4074 microtask_t microtask, launch_t invoker,
4077extern void __kmp_join_call(
ident_t *loc,
int gtid
4080 enum fork_context_e fork_context
4083 int exit_teams = 0);
4085extern void __kmp_serialized_parallel(
ident_t *
id, kmp_int32 gtid);
4086extern void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team);
4087extern void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team);
4088extern int __kmp_invoke_task_func(
int gtid);
4089extern void __kmp_run_before_invoked_task(
int gtid,
int tid,
4090 kmp_info_t *this_thr,
4092extern void __kmp_run_after_invoked_task(
int gtid,
int tid,
4093 kmp_info_t *this_thr,
4097KMP_EXPORT
int __kmpc_invoke_task_func(
int gtid);
4098extern int __kmp_invoke_teams_master(
int gtid);
4099extern void __kmp_teams_master(
int gtid);
4100extern int __kmp_aux_get_team_num();
4101extern int __kmp_aux_get_num_teams();
4102extern void __kmp_save_internal_controls(kmp_info_t *thread);
4103extern void __kmp_user_set_library(
enum library_type arg);
4104extern void __kmp_aux_set_library(
enum library_type arg);
4105extern void __kmp_aux_set_stacksize(
size_t arg);
4106extern void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid);
4107extern void __kmp_aux_set_defaults(
char const *str,
size_t len);
4110void kmpc_set_blocktime(
int arg);
4111void ompc_set_nested(
int flag);
4112void ompc_set_dynamic(
int flag);
4113void ompc_set_num_threads(
int arg);
4115extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
4116 kmp_team_t *team,
int tid);
4117extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
4118extern kmp_task_t *__kmp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
4119 kmp_tasking_flags_t *flags,
4120 size_t sizeof_kmp_task_t,
4121 size_t sizeof_shareds,
4122 kmp_routine_entry_t task_entry);
4123extern void __kmp_init_implicit_task(
ident_t *loc_ref, kmp_info_t *this_thr,
4124 kmp_team_t *team,
int tid,
4126extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
4127extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
4129extern kmp_event_t *__kmpc_task_allow_completion_event(
ident_t *loc_ref,
4132extern void __kmp_fulfill_event(kmp_event_t *event);
4134extern void __kmp_free_task_team(kmp_info_t *thread,
4135 kmp_task_team_t *task_team);
4136extern void __kmp_reap_task_teams(
void);
4137extern void __kmp_push_task_team_node(kmp_info_t *thread, kmp_team_t *team);
4138extern void __kmp_pop_task_team_node(kmp_info_t *thread, kmp_team_t *team);
4139extern void __kmp_wait_to_unref_task_teams(
void);
4140extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team);
4141extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
4142extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
4149extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
4152#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr) \
4154 __kmp_tasking_mode != tskm_task_teams || team->t.t_nproc == 1 || \
4155 thr->th.th_task_team == team->t.t_task_team[thr->th.th_task_state])
4157#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
4160extern int __kmp_is_address_mapped(
void *addr);
4161extern kmp_uint64 __kmp_hardware_timestamp(
void);
4164extern int __kmp_read_from_file(
char const *path,
char const *format, ...);
4172extern int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int npr,
int argc,
4176 void **exit_frame_ptr
4189 size_t vector_length);
4193KMP_EXPORT
void *__kmpc_threadprivate(
ident_t *, kmp_int32 global_tid,
4194 void *data,
size_t size);
4221 kmp_critical_name *);
4223 kmp_critical_name *);
4224KMP_EXPORT
void __kmpc_critical_with_hint(
ident_t *, kmp_int32 global_tid,
4225 kmp_critical_name *, uint32_t hint);
4231 kmp_int32 global_tid);
4238 kmp_int32 numberOfSections);
4241KMP_EXPORT
void KMPC_FOR_STATIC_INIT(
ident_t *loc, kmp_int32 global_tid,
4242 kmp_int32 schedtype, kmp_int32 *plastiter,
4243 kmp_int *plower, kmp_int *pupper,
4244 kmp_int *pstride, kmp_int incr,
4250 size_t cpy_size,
void *cpy_data,
4251 void (*cpy_func)(
void *,
void *),
4257extern void KMPC_SET_NUM_THREADS(
int arg);
4258extern void KMPC_SET_DYNAMIC(
int flag);
4259extern void KMPC_SET_NESTED(
int flag);
4262KMP_EXPORT kmp_int32 __kmpc_omp_task(
ident_t *loc_ref, kmp_int32 gtid,
4263 kmp_task_t *new_task);
4264KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
4266 size_t sizeof_kmp_task_t,
4267 size_t sizeof_shareds,
4268 kmp_routine_entry_t task_entry);
4269KMP_EXPORT kmp_task_t *__kmpc_omp_target_task_alloc(
4270 ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t,
4271 size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id);
4272KMP_EXPORT
void __kmpc_omp_task_begin_if0(
ident_t *loc_ref, kmp_int32 gtid,
4274KMP_EXPORT
void __kmpc_omp_task_complete_if0(
ident_t *loc_ref, kmp_int32 gtid,
4276KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(
ident_t *loc_ref, kmp_int32 gtid,
4277 kmp_task_t *new_task);
4278KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(
ident_t *loc_ref, kmp_int32 gtid);
4279KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(
ident_t *loc_ref, kmp_int32 gtid,
4283void __kmpc_omp_task_begin(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
4284void __kmpc_omp_task_complete(
ident_t *loc_ref, kmp_int32 gtid,
4290KMP_EXPORT
void __kmpc_taskgroup(
ident_t *loc,
int gtid);
4291KMP_EXPORT
void __kmpc_end_taskgroup(
ident_t *loc,
int gtid);
4294 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
4295 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
4296 kmp_depend_info_t *noalias_dep_list);
4298KMP_EXPORT kmp_base_depnode_t *__kmpc_task_get_depnode(kmp_task_t *task);
4300KMP_EXPORT kmp_depnode_list_t *__kmpc_task_get_successors(kmp_task_t *task);
4304 kmp_depend_info_t *dep_list,
4305 kmp_int32 ndeps_noalias,
4306 kmp_depend_info_t *noalias_dep_list);
4309KMP_EXPORT
void __kmpc_omp_taskwait_deps_51(
ident_t *loc_ref, kmp_int32 gtid,
4311 kmp_depend_info_t *dep_list,
4312 kmp_int32 ndeps_noalias,
4313 kmp_depend_info_t *noalias_dep_list,
4314 kmp_int32 has_no_wait);
4316extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
4317 bool serialize_immediate);
4319KMP_EXPORT kmp_int32 __kmpc_cancel(
ident_t *loc_ref, kmp_int32 gtid,
4320 kmp_int32 cncl_kind);
4321KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(
ident_t *loc_ref, kmp_int32 gtid,
4322 kmp_int32 cncl_kind);
4323KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(
ident_t *loc_ref, kmp_int32 gtid);
4324KMP_EXPORT
int __kmp_get_cancellation_status(
int cancel_kind);
4328KMP_EXPORT
void __kmpc_taskloop(
ident_t *loc, kmp_int32 gtid, kmp_task_t *task,
4329 kmp_int32 if_val, kmp_uint64 *lb,
4330 kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
4331 kmp_int32 sched, kmp_uint64 grainsize,
4333KMP_EXPORT
void __kmpc_taskloop_5(
ident_t *loc, kmp_int32 gtid,
4334 kmp_task_t *task, kmp_int32 if_val,
4335 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4336 kmp_int32 nogroup, kmp_int32 sched,
4337 kmp_uint64 grainsize, kmp_int32 modifier,
4346 int num,
void *data);
4350 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
4351 kmp_task_affinity_info_t *affin_list);
4352KMP_EXPORT
void __kmp_set_num_teams(
int num_teams);
4353KMP_EXPORT
int __kmp_get_max_teams(
void);
4354KMP_EXPORT
void __kmp_set_teams_thread_limit(
int limit);
4355KMP_EXPORT
int __kmp_get_teams_thread_limit(
void);
4362KMP_EXPORT
void __kmpc_init_lock(
ident_t *loc, kmp_int32 gtid,
4364KMP_EXPORT
void __kmpc_init_nest_lock(
ident_t *loc, kmp_int32 gtid,
4366KMP_EXPORT
void __kmpc_destroy_lock(
ident_t *loc, kmp_int32 gtid,
4368KMP_EXPORT
void __kmpc_destroy_nest_lock(
ident_t *loc, kmp_int32 gtid,
4370KMP_EXPORT
void __kmpc_set_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4371KMP_EXPORT
void __kmpc_set_nest_lock(
ident_t *loc, kmp_int32 gtid,
4373KMP_EXPORT
void __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
4375KMP_EXPORT
void __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
4377KMP_EXPORT
int __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4378KMP_EXPORT
int __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
4381KMP_EXPORT
void __kmpc_init_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4382 void **user_lock, uintptr_t hint);
4383KMP_EXPORT
void __kmpc_init_nest_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4391static inline bool __kmp_tdg_is_recording(kmp_tdg_status_t status) {
4392 return status == KMP_TDG_RECORDING;
4395KMP_EXPORT kmp_int32 __kmpc_start_record_task(
ident_t *loc, kmp_int32 gtid,
4396 kmp_int32 input_flags,
4398KMP_EXPORT
void __kmpc_end_record_task(
ident_t *loc, kmp_int32 gtid,
4399 kmp_int32 input_flags, kmp_int32 tdg_id);
4404 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4405 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4406 kmp_critical_name *lck);
4408 kmp_critical_name *lck);
4410 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4411 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4412 kmp_critical_name *lck);
4414 kmp_critical_name *lck);
4418extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
4419 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4420 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4421 kmp_critical_name *lck);
4424KMP_EXPORT kmp_int32 __kmp_get_reduce_method(
void);
4426KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
4427KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
4433KMP_EXPORT
void __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid);
4435 kmp_int32 num_threads);
4436KMP_EXPORT
void __kmpc_push_num_threads_strict(
ident_t *loc,
4437 kmp_int32 global_tid,
4438 kmp_int32 num_threads,
4440 const char *message);
4443 kmp_uint32 list_length,
4444 kmp_int32 *num_threads_list);
4445KMP_EXPORT
void __kmpc_push_num_threads_list_strict(
4446 ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length,
4447 kmp_int32 *num_threads_list,
int severity,
const char *message);
4449KMP_EXPORT
void __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid,
4452 kmp_int32 num_teams,
4453 kmp_int32 num_threads);
4455 kmp_int32 thread_limit);
4458 kmp_int32 num_teams_lb,
4459 kmp_int32 num_teams_ub,
4460 kmp_int32 num_threads);
4468KMP_EXPORT
void __kmpc_doacross_init(
ident_t *loc, kmp_int32 gtid,
4470 const struct kmp_dim *dims);
4471KMP_EXPORT
void __kmpc_doacross_wait(
ident_t *loc, kmp_int32 gtid,
4472 const kmp_int64 *vec);
4473KMP_EXPORT
void __kmpc_doacross_post(
ident_t *loc, kmp_int32 gtid,
4474 const kmp_int64 *vec);
4475KMP_EXPORT
void __kmpc_doacross_fini(
ident_t *loc, kmp_int32 gtid);
4478 void *data,
size_t size,
4483void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
4484 void *data_addr,
size_t pc_size);
4485struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
4488void __kmp_threadprivate_resize_cache(
int newCapacity);
4489void __kmp_cleanup_threadprivate_caches();
4493#define KMPC_CONVENTION __cdecl
4495#define KMPC_CONVENTION
4499typedef enum omp_sched_t {
4500 omp_sched_static = 1,
4501 omp_sched_dynamic = 2,
4502 omp_sched_guided = 3,
4505typedef void *kmp_affinity_mask_t;
4508KMP_EXPORT
void KMPC_CONVENTION ompc_set_max_active_levels(
int);
4509KMP_EXPORT
void KMPC_CONVENTION ompc_set_schedule(omp_sched_t,
int);
4510KMP_EXPORT
int KMPC_CONVENTION ompc_get_ancestor_thread_num(
int);
4511KMP_EXPORT
int KMPC_CONVENTION ompc_get_team_size(
int);
4512KMP_EXPORT
int KMPC_CONVENTION
4513kmpc_set_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4514KMP_EXPORT
int KMPC_CONVENTION
4515kmpc_unset_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4516KMP_EXPORT
int KMPC_CONVENTION
4517kmpc_get_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4519KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize(
int);
4520KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize_s(
size_t);
4521KMP_EXPORT
void KMPC_CONVENTION kmpc_set_library(
int);
4522KMP_EXPORT
void KMPC_CONVENTION kmpc_set_defaults(
char const *);
4523KMP_EXPORT
void KMPC_CONVENTION kmpc_set_disp_num_buffers(
int);
4524void KMP_EXPAND_NAME(ompc_set_affinity_format)(
char const *format);
4525size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(
char *buffer,
size_t size);
4526void KMP_EXPAND_NAME(ompc_display_affinity)(
char const *format);
4527size_t KMP_EXPAND_NAME(ompc_capture_affinity)(
char *buffer,
size_t buf_size,
4528 char const *format);
4530enum kmp_target_offload_kind {
4535typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
4537extern kmp_target_offload_kind_t __kmp_target_offload;
4538extern int __kmpc_get_target_offload();
4541#define KMP_DEVICE_DEFAULT -1
4542#define KMP_DEVICE_ALL -11
4548typedef enum kmp_pause_status_t {
4550 kmp_soft_paused = 1,
4551 kmp_hard_paused = 2,
4552 kmp_stop_tool_paused = 3
4553} kmp_pause_status_t;
4556extern kmp_pause_status_t __kmp_pause_status;
4557extern int __kmpc_pause_resource(kmp_pause_status_t level);
4558extern int __kmp_pause_resource(kmp_pause_status_t level);
4560extern void __kmp_resume_if_soft_paused();
4564static inline void __kmp_resume_if_hard_paused() {
4565 if (__kmp_pause_status == kmp_hard_paused) {
4566 __kmp_pause_status = kmp_not_paused;
4570extern void __kmp_omp_display_env(
int verbose);
4573extern volatile int __kmp_init_hidden_helper;
4575extern volatile int __kmp_hidden_helper_team_done;
4577extern kmp_int32 __kmp_enable_hidden_helper;
4579extern kmp_info_t *__kmp_hidden_helper_main_thread;
4581extern kmp_info_t **__kmp_hidden_helper_threads;
4583extern kmp_int32 __kmp_hidden_helper_threads_num;
4585extern std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
4587extern void __kmp_hidden_helper_initialize();
4588extern void __kmp_hidden_helper_threads_initz_routine();
4589extern void __kmp_do_initialize_hidden_helper_threads();
4590extern void __kmp_hidden_helper_threads_initz_wait();
4591extern void __kmp_hidden_helper_initz_release();
4592extern void __kmp_hidden_helper_threads_deinitz_wait();
4593extern void __kmp_hidden_helper_threads_deinitz_release();
4594extern void __kmp_hidden_helper_main_thread_wait();
4595extern void __kmp_hidden_helper_worker_thread_wait();
4596extern void __kmp_hidden_helper_worker_thread_signal();
4597extern void __kmp_hidden_helper_main_thread_release();
4600#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4601 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4603#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4604 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4606#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid) \
4607 ((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4609#define KMP_HIDDEN_HELPER_TEAM(team) \
4610 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4614#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4615 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4620static inline int __kmp_adjust_gtid_for_hidden_helpers(
int gtid) {
4621 int adjusted_gtid = gtid;
4622 if (__kmp_hidden_helper_threads_num > 0 && gtid > 0 &&
4623 gtid - __kmp_hidden_helper_threads_num >= 0) {
4624 adjusted_gtid -= __kmp_hidden_helper_threads_num;
4626 return adjusted_gtid;
4629#if ENABLE_LIBOMPTARGET
4632extern void (*kmp_target_sync_cb)(
ident_t *loc_ref,
int gtid,
4633 void *current_task,
void *event);
4637typedef enum kmp_severity_t {
4638 severity_warning = 1,
4641extern void __kmpc_error(
ident_t *loc,
int severity,
const char *message);
4644KMP_EXPORT
void __kmpc_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4645KMP_EXPORT
void __kmpc_end_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4651template <
bool C,
bool S>
4652extern void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4653template <
bool C,
bool S>
4654extern void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4655template <
bool C,
bool S>
4656extern void __kmp_atomic_suspend_64(
int th_gtid,
4657 kmp_atomic_flag_64<C, S> *flag);
4658extern void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag);
4659#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4660template <
bool C,
bool S>
4661extern void __kmp_mwait_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4662template <
bool C,
bool S>
4663extern void __kmp_mwait_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4664template <
bool C,
bool S>
4665extern void __kmp_atomic_mwait_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag);
4666extern void __kmp_mwait_oncore(
int th_gtid, kmp_flag_oncore *flag);
4668template <
bool C,
bool S>
4669extern void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag);
4670template <
bool C,
bool S>
4671extern void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag);
4672template <
bool C,
bool S>
4673extern void __kmp_atomic_resume_64(
int target_gtid,
4674 kmp_atomic_flag_64<C, S> *flag);
4675extern void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag);
4677template <
bool C,
bool S>
4678int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
4679 kmp_flag_32<C, S> *flag,
int final_spin,
4680 int *thread_finished,
4684 kmp_int32 is_constrained);
4685template <
bool C,
bool S>
4686int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4687 kmp_flag_64<C, S> *flag,
int final_spin,
4688 int *thread_finished,
4692 kmp_int32 is_constrained);
4693template <
bool C,
bool S>
4694int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4695 kmp_atomic_flag_64<C, S> *flag,
4696 int final_spin,
int *thread_finished,
4700 kmp_int32 is_constrained);
4701int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
4702 kmp_flag_oncore *flag,
int final_spin,
4703 int *thread_finished,
4707 kmp_int32 is_constrained);
4709extern int __kmp_nesting_mode;
4710extern int __kmp_nesting_mode_nlevels;
4711extern int *__kmp_nesting_nth_level;
4712extern void __kmp_init_nesting_mode();
4713extern void __kmp_set_nesting_mode_threads();
4721class kmp_safe_raii_file_t {
4725 if (f && f != stdout && f != stderr) {
4732 kmp_safe_raii_file_t() : f(
nullptr) {}
4733 kmp_safe_raii_file_t(
const char *filename,
const char *mode,
4734 const char *env_var =
nullptr)
4736 open(filename, mode, env_var);
4738 kmp_safe_raii_file_t(
const kmp_safe_raii_file_t &other) =
delete;
4739 kmp_safe_raii_file_t &operator=(
const kmp_safe_raii_file_t &other) =
delete;
4740 ~kmp_safe_raii_file_t() { close(); }
4745 void open(
const char *filename,
const char *mode,
4746 const char *env_var =
nullptr) {
4748 f = fopen(filename, mode);
4752 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4753 KMP_HNT(CheckEnvVar, env_var, filename), __kmp_msg_null);
4755 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4764 f = fopen(filename, mode);
4781 operator bool() {
return bool(f); }
4782 operator FILE *() {
return f; }
4785template <
typename SourceType,
typename TargetType,
4786 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4787 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4788 bool isSourceSigned = std::is_signed<SourceType>::value,
4789 bool isTargetSigned = std::is_signed<TargetType>::value>
4790struct kmp_convert {};
4793template <
typename SourceType,
typename TargetType>
4794struct kmp_convert<SourceType, TargetType, true, false, true, true> {
4795 static TargetType to(SourceType src) {
return (TargetType)src; }
4798template <
typename SourceType,
typename TargetType>
4799struct kmp_convert<SourceType, TargetType, false, true, true, true> {
4800 static TargetType to(SourceType src) {
return src; }
4803template <
typename SourceType,
typename TargetType>
4804struct kmp_convert<SourceType, TargetType, false, false, true, true> {
4805 static TargetType to(SourceType src) {
4806 KMP_ASSERT(src <=
static_cast<SourceType
>(
4807 (std::numeric_limits<TargetType>::max)()));
4808 KMP_ASSERT(src >=
static_cast<SourceType
>(
4809 (std::numeric_limits<TargetType>::min)()));
4810 return (TargetType)src;
4816template <
typename SourceType,
typename TargetType>
4817struct kmp_convert<SourceType, TargetType, true, false, true, false> {
4818 static TargetType to(SourceType src) {
4819 KMP_ASSERT(src >= 0);
4820 return (TargetType)src;
4824template <
typename SourceType,
typename TargetType>
4825struct kmp_convert<SourceType, TargetType, false, true, true, false> {
4826 static TargetType to(SourceType src) {
4827 KMP_ASSERT(src >= 0);
4828 return (TargetType)src;
4832template <
typename SourceType,
typename TargetType>
4833struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4834 static TargetType to(SourceType src) {
4835 KMP_ASSERT(src >= 0);
4836 KMP_ASSERT(src <=
static_cast<SourceType
>(
4837 (std::numeric_limits<TargetType>::max)()));
4838 return (TargetType)src;
4844template <
typename SourceType,
typename TargetType>
4845struct kmp_convert<SourceType, TargetType, true, false, false, true> {
4846 static TargetType to(SourceType src) {
return (TargetType)src; }
4849template <
typename SourceType,
typename TargetType>
4850struct kmp_convert<SourceType, TargetType, false, true, false, true> {
4851 static TargetType to(SourceType src) {
4852 KMP_ASSERT(src <=
static_cast<SourceType
>(
4853 (std::numeric_limits<TargetType>::max)()));
4854 return (TargetType)src;
4858template <
typename SourceType,
typename TargetType>
4859struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4860 static TargetType to(SourceType src) {
4861 KMP_ASSERT(src <=
static_cast<SourceType
>(
4862 (std::numeric_limits<TargetType>::max)()));
4863 return (TargetType)src;
4869template <
typename SourceType,
typename TargetType>
4870struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4871 static TargetType to(SourceType src) {
return (TargetType)src; }
4874template <
typename SourceType,
typename TargetType>
4875struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4876 static TargetType to(SourceType src) {
return src; }
4879template <
typename SourceType,
typename TargetType>
4880struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4881 static TargetType to(SourceType src) {
4882 KMP_ASSERT(src <=
static_cast<SourceType
>(
4883 (std::numeric_limits<TargetType>::max)()));
4884 return (TargetType)src;
4888template <
typename T1,
typename T2>
4889static inline void __kmp_type_convert(T1 src, T2 *dest) {
4890 *dest = kmp_convert<T1, T2>::to(src);
int try_open(const char *filename, const char *mode)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_ATOMIC_HINT_MASK
@ KMP_IDENT_WORK_DISTRIBUTE
@ KMP_IDENT_ATOMIC_REDUCE
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_fork_call_if(ident_t *loc, kmp_int32 nargs, kmpc_micro microtask, kmp_int32 cond, void *args)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_set_thread_limit(ident_t *loc, kmp_int32 global_tid, kmp_int32 thread_limit)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads_list(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT bool __kmpc_omp_has_task_team(kmp_int32 gtid)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
void(* kmpc_dtor)(void *)
void *(* kmpc_cctor)(void *, void *)
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_ctor)(void *)
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_distribute_static_chunked
@ kmp_sch_modifier_monotonic
@ kmp_sch_modifier_nonmonotonic
Memory allocator information is shared with offload runtime.
Memory space informaition is shared with offload runtime.