// ==++== // // Copyright (c) Microsoft Corporation. All rights reserved. // // ==--== // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // SchedulerBase.h // // Header file containing the metaphor for a concrt scheduler // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #pragma once // // Defines how many (1 << x) pointers worth of padding there will be in between quick cache slots. // #ifdef _WIN64 // // 64 bit: // 1 << 4 == 8 pointers * 8 == 64 bytes (assumed cache pad) // #define QUICKCACHEPAD_SHIFT 4 #else // !_WIN64 // // 32 bit: // 1 << 5 == 16 pointers * 4 == 64 bytes (assumed cache pad) // #define QUICKCACHEPAD_SHIFT 5 #endif // _WIN64 namespace Concurrency { namespace details { // The base class that implements a scheduler instance class SchedulerBase : public Scheduler { private: // // NESTED CLASSES: // /// /// Represents information about the NUMA nodes on the machine. /// struct NumaInformation { QuickBitSet m_nodeMask; QuickBitSet m_resourceMask; }; /// /// An intrusive node type for context tracking outside of the normal placement of contexts upon /// free/runnable lists. /// class ContextNode { public: ContextNode(InternalContextBase *pContext) : m_pContext(pContext) { } SLIST_ENTRY m_slNext{}; InternalContextBase *m_pContext; }; /// /// A node that tracks events needing to be signaled at finalization time. /// class WaitNode { public: WaitNode *m_pNext, *m_pPrev; HANDLE m_hEvent; }; /// /// A class that the scheduler uses to manage external context exit events for implicitly attached /// external contexts. /// class ContextExitEventHandler { public: bool m_fCanceled; // Count of handles the event handler is waiting on at any time. unsigned short m_handleCount; // Modified to reflect the new handle count after adding handles to the wait array and before notifying the // watch dog of handle addition. unsigned short m_newHandleCount; // Event handle used to notify the event handler of certain events (new handle addition, shutdown). HANDLE m_hWakeEventHandler; SchedulerBase *m_pScheduler; // prev, next pointers for the list of all handlers in the scheduler. ContextExitEventHandler *m_pNext, *m_pPrev; // list entry for a list of handlers with available slots for context handles. The scheduler uses this // list when registering contexts. ListEntry m_availableChain; // The array of wait handles each thread waits on. Of these one is an event handle for notification // and the rest are handles to OS contexts. HANDLE m_waitHandleArray[MAXIMUM_WAIT_OBJECTS]; }; public: /// /// Creates a scheduler that only manages internal contexts. Implicitly calls Reference. /// If Attach is called, the scheduler is no longer anonymous because it is also managing the external /// context where Attach was called. To destroy an anonymous scheduler, Release needs to be called. /// /// /// [in] A const reference to the scheduler policy. /// /// /// A pointer to the new scheduler (never null) /// static _Ret_writes_(1) SchedulerBase* Create(_In_ const SchedulerPolicy& policy); static _Ret_writes_(1) SchedulerBase* CreateWithoutInitializing(_In_ const SchedulerPolicy& policy); // Constructor SchedulerBase(_In_ const ::Concurrency::SchedulerPolicy& policy); // dtor virtual ~SchedulerBase(); public: // Public Scheduler interface /// /// Returns a unique identifier for this scheduler. No error state. /// virtual unsigned int Id() const { return m_id; } /// /// Returns a current number of virtual processors for this scheduler. No error state. /// virtual unsigned int GetNumberOfVirtualProcessors() const { return m_virtualProcessorCount; }; /// /// Returns a copy of the policy this scheduler is using. No error state. /// virtual SchedulerPolicy GetPolicy() const; /// /// Increments a reference count to this scheduler to manage lifetimes over composition. /// This reference count is known as the scheduler reference count. /// /// /// The resulting reference count is returned. No error state. /// virtual unsigned int Reference(); /// /// Decrements this scheduler's reference count to manage lifetimes over composition. /// A scheduler starts the shutdown protocol when the scheduler reference count goes to zero. /// /// /// The resulting reference count is returned. No error state. /// virtual unsigned int Release(); /// /// Causes the OS event object 'event' to be set when the scheduler shuts down and destroys itself. /// /// /// [in] A handle to avalid event object /// virtual void RegisterShutdownEvent(_In_ HANDLE event); /// /// Attaches this scheduler to the calling thread. Implicitly calls Reference. /// After this function is called, the calling thread is then managed by the scheduler and the scheduler becomes the current scheduler. /// It is illegal for an internal context to call Attach on its current scheduler. /// virtual void Attach(); /// /// Allows a user defined policy to be used to create the default scheduler. It is only valid to call this API when no default /// scheduler exists. Once a default policy is set, it remains in effect until the next time the API is called (in the absence /// of a default scheduler). /// /// /// [in] A pointer to the policy to be set as the default. The runtime will make a copy of the policy /// for its use, and the user is responsible for the lifetime of the policy that is passed in. /// static void SetDefaultSchedulerPolicy(_In_ const SchedulerPolicy & _Policy); /// /// Resets the default scheduler policy, and the next time a default scheduler is created, it will use the runtime's default policy settings. /// static void ResetDefaultSchedulerPolicy(); /// /// Creates a new schedule group within the scheduler associated with the calling context. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your reference on the scheduler, via detaching the current context from it. /// /// /// /// virtual ScheduleGroup* CreateScheduleGroup() { location unbiased; return InternalCreateScheduleGroup(&unbiased); } /// /// Creates a new schedule group within the scheduler associated with the calling context. Tasks scheduled within the newly created /// schedule group will be biased towards executing at the specified location. /// /// /// A reference to a location where the tasks within the schedule group will biased towards executing at. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your reference on the scheduler, via detaching the current context from it. /// /// /// /// /// virtual ScheduleGroup * CreateScheduleGroup(location& _Placement) { return InternalCreateScheduleGroup(&_Placement); } /// /// Schedules a light-weight task within the scheduler. The light-weight task will be placed in a schedule group of the runtime's choosing. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// virtual void ScheduleTask(TaskProc proc, void *data); /// /// Schedules a light-weight task within the scheduler. The light-weight task will be placed /// within a schedule group of the runtime's choosing. It will also be biased towards executing at the specified location. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// A reference to a location where the light-weight task will be biased towards executing at. /// /// /// /// virtual void ScheduleTask(TaskProc proc, void * data, location& placement); /// /// Determines whether a given location is available on the scheduler. /// /// /// A reference to the location to query the scheduler about. /// /// /// An indication of whether or not the location specified by the argument is available on the scheduler. /// /// /// Note that the return value is an instantaneous sampling of whether the given location is available. In the presence of multiple /// schedulers, dynamic resource management may add or take away resources from schedulers at any point. Should this happen, the given /// location may change availability. /// virtual bool IsAvailableLocation(const location& _Placement) const; public: // Internal stuff enum { // // One shot starts with a single reference count placed implicitly by the module in which ConcRT is contained. // ONESHOT_NOT_INITIALIZED = 1, ONESHOT_INITIALIZED_FLAG = 0x80000000 }; /// /// Returns whether or not the scheduler has performed one shot static construction. /// static bool IsOneShotInitialized() { return ((s_oneShotInitializationState & ONESHOT_INITIALIZED_FLAG) != 0); } /// /// Detaches this scheduler from the current thread. It is required that the current scheduler on the thread be the same as 'this' /// void Detach(); /// /// Generates a unique identifier for a context. /// unsigned int GetNewContextId(); /// /// Generates a unique identifier for a schedule group. /// unsigned int GetNewScheduleGroupId(); /// /// Generates a unique identifier for a work queue across schedulers. /// static unsigned int GetNewWorkQueueId(); /// /// Gets a reserved context off the free list. This is lock-free and safe to use at any point in the scheduler. If a context /// is returned, it is a pre-bound and unstarted context. /// InternalContextBase *GetReservedContext() { return m_reservedContexts.Pop(); } /// /// Releases the list of reserved contexts to the idle pool. The thread proxy /// is released before returning the contexts to the idle pool. /// void ReleaseReservedContexts(); /// /// Acquires a new internal context of the appropriate type and returns it. This can come from either /// a free list within the scheduler, or be one newly allocated from the heap. /// /// /// An indication as to whether the creation should be throttled. /// InternalContextBase *GetInternalContext(bool fThrottled = true); /// /// Acquires a new internal context of the appropriate type and notifies the scheduler when it is available. The scheduler can /// choose what to do with said internal context. This creation happens in a deferred manner subject to throttling constraints. /// void DeferredGetInternalContext(); /// /// Releases an internal context to the scheduler's idle pool. /// void ReleaseInternalContext(InternalContextBase *pContext, bool fUnbind = false); /// /// Gets a realized chore from the idle pool, creating a new one if the idle pool is empty. /// RealizedChore *GetRealizedChore(TaskProc pFunction, void* pParameters); /// /// Releases an external context of the to the scheduler's idle pool, destroying it if the idle pool is full. /// void ReleaseRealizedChore(RealizedChore *pChore); /// /// References the anonymous schedule group, creating it if it doesn't exists, and returns a pointer to it. /// ScheduleGroupBase* GetAnonymousScheduleGroup() { return m_pAnonymousScheduleGroup; } /// /// References a segment in the anonymous schedule group and returns a pointer to it. /// /// /// A segment in the anonymous schedule group. /// ScheduleGroupSegmentBase *GetAnonymousScheduleGroupSegment(); static SchedulerBase* CurrentScheduler(); static SchedulerBase* FastCurrentScheduler(); static SchedulerBase* SafeFastCurrentScheduler(); static ContextBase* FastCurrentContext(); static ContextBase* SafeFastCurrentContext(); static ContextBase* CreateContextFromDefaultScheduler(); static ContextBase* CurrentContext() { if ( !IsOneShotInitialized()) return CreateContextFromDefaultScheduler(); ContextBase *pContext = (ContextBase*) platform::__TlsGetValue(t_dwContextIndex); if (pContext == NULL) return CreateContextFromDefaultScheduler(); return pContext; } /// /// Gets an IScheduler pointer for use in communication with the resource manager. /// virtual IScheduler* GetIScheduler() = 0; /// /// Gets an IResourceManager pointer for use in communication with the resource manager. /// IResourceManager *GetResourceManager() const { return m_pResourceManager; } /// /// Gets an ISchedulerProxy pointer for use in communication with the resource manager. /// ISchedulerProxy *GetSchedulerProxy() const { return m_pSchedulerProxy; } /// /// Find an available virtual processor in the scheduler. /// bool FoundAvailableVirtualProcessor(VirtualProcessor::ClaimTicket& ticket, location bias = location(), ULONG type = VirtualProcessor::AvailabilityAny); /// /// Try to steal from foreign nodes. /// InternalContextBase *StealForeignLocalRunnableContext(SchedulingNode *pSkipNode); /// /// Start up a virtual processor in the scheduler, if one is found. The virtual processor must have the specified availability /// characteristics. /// bool StartupVirtualProcessor(ScheduleGroupSegmentBase *pSegment, location bias = location(), ULONG type = VirtualProcessor::AvailabilityAny); /// /// Start up an idle virtual processor in the scheduler. This can be any virtual processor except one that is inactive due to /// waiting for a thread creation. /// bool StartupIdleVirtualProcessor(ScheduleGroupSegmentBase *pSegment, location bias = location()) { // // If the vproc is inactive pending thread -- there's no point in performing a general wake up. The general wake up will require an SFW // context which will simply put it back to sleep and violate our concurrency constraints. Either an incoming runnable must push to the // context or the throttler must wake it up. // return StartupVirtualProcessor(pSegment, bias, VirtualProcessor::AvailabilityAny & ~VirtualProcessor::AvailabilityInactivePendingThread); } /// /// Start up an new virtual processor in the scheduler. New virtual processor refers /// to any vproc that either has never been activated or has been deactivated due to lack /// of work (wait for work). /// virtual void StartupNewVirtualProcessor(ScheduleGroupSegmentBase *pSegment, location bias = location()) { StartupVirtualProcessor(pSegment, bias, (VirtualProcessor::AvailabilityType)(VirtualProcessor::AvailabilityIdle | VirtualProcessor::AvailabilityInactive)); } /// /// Attempts to push a runnable to an inactive virtual processor. If successful, true is returned. /// virtual bool PushRunnableToInactive(InternalContextBase *pRunnableContext, location bias = location()); /// /// Called when a virtual processor becomes active (before it does) or becomes inactive (before it does). /// /// /// True if a virtual processor is going from INACTIVE to ACTIVE, and false if it is going from ACTIVE to INACTIVE. /// /// /// For activation, the function returns true if the virtual processor was successfully activated, and false /// if it could not be activated because the scheduler was shutting down. For inactivation, it always returns true. /// bool VirtualProcessorActive(bool fActive); /// /// Internal contexts and background threads call this when created and used inside the scheduler. /// void IncrementInternalContextCount(); /// /// Internal contexts and background threads call this function in order to notify that they are about to exit. /// The last caller will trigger scheduler finalization. /// void DecrementInternalContextCount(); /// /// Returns the scheduling protocol policy element value this scheduler was created with. /// ::Concurrency::SchedulingProtocolType GetSchedulingProtocol() { return m_schedulingProtocol; } /// /// Returns a pointer to the 'next' scheduling ring in a round-robin manner /// SchedulingRing *GetNextSchedulingRing(); // Specifying pOwningNode produces an order of scheduling rings, ordered by node distance. // pCurrentNode is the current position in said order. SchedulingRing *GetNextSchedulingRing(const SchedulingRing *pOwningRing, SchedulingRing *pCurrentRing); /// /// Sets the 'next' scheduling ring in a round-robin manner /// void SetNextSchedulingRing(SchedulingRing *pRing); /// /// Returns true if the scheduler has gone past a certain point in PhaseTwoShutdown (when it sets the shutdown completed flag). /// This function is mainly used for debug asserts. /// bool HasCompletedShutdown(); /// /// Returns true if the scheduler is in the finalization sweep, i.e, the SUSPEND_GATE_FLAG is set. /// This function is mainly used for debug asserts. /// bool InFinalizationSweep(); /// /// Internal contexts call the scheduler when they go idle for a specified amount of time in order to allow /// things that happen on scheduler idle to happen (e.g.: sweeping for phase two shutdown). /// They must also call the scheduler when they transition out of idle before executing a work item or performing /// a context switch. This may halt scheduler shutdown or it may coordinate with scheduler shutdown depending on /// the current phase of shutdown. /// /// This call *MUST* be made from a scheduler critical region. /// /// /// Specifies whether the processor is going idle or non-idle. /// void VirtualProcessorIdle(bool fIdle); /// /// Adds a new statistics class to track. /// /// /// The statistics we are adding to the scheduler's ListArray for tracking. /// void AddExternalStatistics(ExternalStatistics * pStats) { m_externalThreadStatistics.Add(pStats); } /// /// Saves the statistical information from the retiring virtual processor. /// /// /// The virtual processor that is retiring and whose statistics we are trying to preserve. /// /// /// The reason we use interlocked operation here is because multiple virtual processors can /// be retiring at the same time and the error can be much greater than on a simple increment. /// void SaveRetiredVirtualProcessorStatistics(VirtualProcessor * pVProc) { InterlockedExchangeAdd((volatile long *) &m_enqueuedTaskCounter, pVProc->GetEnqueuedTaskCount()); InterlockedExchangeAdd((volatile long *) &m_dequeuedTaskCounter, pVProc->GetDequeuedTaskCount()); } /// /// Resets the count of work coming in. /// /// /// Previous value of the counter. /// unsigned int GetEnqueuedTaskCount() { ULONG currentValue = m_enqueuedTaskCounter; unsigned int retVal = (unsigned int) (currentValue - m_enqueuedTaskCheckpoint); // Update the checkpoint value with the current value m_enqueuedTaskCheckpoint = currentValue; ASSERT(retVal < INT_MAX); return retVal; } /// /// Resets the count of work being done. /// /// /// Previous value of the counter. /// unsigned int GetDequeuedTaskCount() { ULONG currentValue = m_dequeuedTaskCounter; unsigned int retVal = (unsigned int) (currentValue - m_dequeuedTaskCheckpoint); // Update the checkpoint value with the current value m_dequeuedTaskCheckpoint = currentValue; ASSERT(retVal < INT_MAX); return retVal; } /// /// Returns a suballocator from the pool of suballocators in the process, or creates a new one. The RM only allows /// a fixed number of allocators for external contexts in the process, whereas every virtual processor that requests /// an allocator will get one. /// /// /// Specifies whether the allocator is being requested for an external context. If this is 'true' the RM will return /// NULL if it has reached its limit of suballocators for external contexts. If this is 'false', the caller is requesting /// the suballocator for a virtual processor, and the RM *must* allocate one (resources permitting). /// static SubAllocator* GetSubAllocator(bool fExternalAllocator); /// /// Returns a suballocator back to the pool in the RM. The RM caches a fixed number of suballocators and will destroy the /// rest. /// static void ReturnSubAllocator(SubAllocator* pAllocator); /// /// Enqueues a context into m_allContexts /// void AddContext(InternalContextBase * pContext); /// /// Returns the first scheduling node. /// /// /// The iterator position of the returned scheduling node will be placed here. This can only be /// utilized as the pIdx parameter or the idxStart parameter of a GetNextSchedulingNode. /// SchedulingNode *GetFirstSchedulingNode(int *pIdx) { *pIdx = 0; return GetNextSchedulingNode(pIdx, -1); } /// /// Returns the next scheduling node in an iteration. /// SchedulingNode *GetNextSchedulingNode(int *pIdx, int idxStart = 0) { int base = *pIdx + (idxStart == -1 ? 0 : 1); int size = m_nodeCount; for (int i = 0; i < size; i++) { int index = (i + base) % size; if (index == idxStart) return NULL; SchedulingNode *pNode = m_nodes[index]; if (pNode != NULL) { *pIdx = index; return pNode; } } return NULL; } /// /// Performs a reference on one shot static items. The caller should CheckOneShotStaticDestruction to remove /// the reference count. /// static LONG ReferenceStaticOneShot() { return InterlockedIncrement(&s_oneShotInitializationState); } /// /// Removes a previous reference on one shot static items. /// static LONG DereferenceStaticOneShot() { return InterlockedDecrement(&s_oneShotInitializationState); } /// /// Called at unload/process exit to perform cleanup of one-shot initialization items. /// static void CheckOneShotStaticDestruction(); /// /// Called when a particular virtual processor reaches a safe point. This function does very little unless there has /// been a change in the version number of the safe point. /// /// /// The safe point marker for a given virtual processor. This is the virtual processor reaching a safe point. /// /// /// An indication of whether a commit should take place. If this is true, the caller should call CommitSafePoints when possible. /// Note that this is a return value so that things like UMS virtual processors can exit critical regions before performing /// the commit (to avoid, for instance, heap locks in critical regions). /// bool MarkSafePoint(SafePointMarker *pMarker); /// /// Called to make a determination of what version of data we can commit up to. This is the minimum data version that all virtual /// processors have observed. /// void CommitSafePoints(); /// /// The routine is used to trigger a safe point commit on all the vprocs by /// updating the data version. /// void TriggerCommitSafePoints(SafePointMarker *pMarker); /// /// Determines how long in milliseconds until the next set of threads is allowed to be created. /// ULONG ThrottlingTime(ULONG stepWidth); /// /// Returns the delay before the next thread can be created. /// ULONG ThrottlingDelta() { ULONGLONG curTime = platform::__GetTickCount64(); ULONG delta = (ULONG)(curTime - m_lastThrottledCreateTime); return delta; } /// /// Puts a timestamp on the last time a throttled thread was created. /// void StampThrottledCreate() { m_lastThrottledCreateTime = platform::__GetTickCount64(); } /// /// Returns whether a virtual processor is available. /// bool HasVirtualProcessorAvailable() const { return m_virtualProcessorAvailableCount > 0; } /// /// Returns whether a virtual processor is waiting for throttling. /// bool HasVirtualProcessorPendingThreadCreate() const { return m_virtualProcessorsPendingThreadCreate > 0; } /// /// Returns whether a virtual processor is available to execute new work. /// bool HasVirtualProcessorAvailableForNewWork() const { // // The observational race (lack of atomicity between the two reads) should not matter. If it does in some obscure // case, a new atomic counter can be added. // return (m_virtualProcessorAvailableCount - m_virtualProcessorsPendingThreadCreate) > 0; } /// /// Removes an unreferenced schedule group from the scheduler's list of groups. /// void RemoveScheduleGroup(ScheduleGroupBase *pGroup); /// /// Returns the scheduling node associated with the calling thread, if any. This method only returns a node if the current /// context is an internal context. /// SchedulingNode * FindCurrentNode(); /// /// Returns the scheduling node which pSrcLocation is a member of. Note that if srcLocation and this node's location do not intersect, /// this will return NULL. /// SchedulingNode * FindNodeByLocation(location* pSrcLocation); /// /// Returns whether or not a location has a tight binding to an object on this scheduler. /// bool IsLocationBound(const location* pLoc) const { return (pLoc->_GetBindingId() == m_id); } /// /// Returns a bit set for a given location to perform quick masking. /// QuickBitSet GetBitSet(const location* pLoc); /// /// Notifies the scheduler that a given virtual processor is listening for affinity events pertaining to its underlying /// resource. Note that this is a reference counted API. /// /// /// The mask id assigned for a given resource. /// void ListenAffinity(unsigned int maskId) { m_nonAffineResourceListeners.InterlockedSet(maskId); OMTRACE(MTRACE_EVT_LISTENINGTRUE, this, NULL, NULL, maskId); ClearQuickCacheSlot(maskId); } /// /// Notifies the scheduler that a given virtual processor is ignoring messages for affinity events pertaining to its underlying /// resource. Note that this is a reference counted API. /// /// /// The mask id assigned for a given resource. /// void IgnoreAffinity(unsigned int maskId) { m_nonAffineResourceListeners.InterlockedClear(maskId); OMTRACE(MTRACE_EVT_LISTENINGFALSE, this, NULL, NULL, maskId); } /// /// Called when affine work comes into the scheduler, this posts any required notifications to virtual processors which are executing /// non-affine work that they need to stop working on their current group and search for affine work again. /// void PostAffinityMessage(const QuickBitSet& srcMask) { if (srcMask.Intersects(m_nonAffineResourceListeners)) { OMTRACE(MTRACE_EVT_POSTAFFINITYMESSAGE, this, NULL, NULL, srcMask.DbgAcquireBits(0)); m_affinityMessages.InterlockedSet(srcMask & m_nonAffineResourceListeners); } } /// /// Returns whether a given resource id has a message for affinity and, if so, acknowledges it. /// bool AcknowledgedAffinityMessage(unsigned int maskId) { bool hasMessage = m_affinityMessages.IsSet(maskId); if (hasMessage) m_affinityMessages.InterlockedClear(maskId); return hasMessage; } /// /// Returns the mask id for a given resource id. /// unsigned int GetResourceMaskId(unsigned int resourceId) { unsigned int val; Hash::ListNode *pNode = m_resourceBitMap.Find(resourceId, &val); ASSERT(pNode != NULL); return val; } /// /// Returns the number of mask ids associated with the scheduler. /// unsigned int GetMaskIdCount() const { return ::Concurrency::GetProcessorCount(); } /// /// Acquires the quick cache slot. /// ScheduleGroupSegmentBase *AcquireQuickCacheSlot(unsigned int maskId) { // // Make **SURE** this is short, sweet, and inlines. // if (m_pCoreAffinityQuickCache[static_cast(maskId) << QUICKCACHEPAD_SHIFT] > reinterpret_cast(1)) { return ActualGetQuickCacheSlot(maskId); } return NULL; } /// /// Clears the quick cache slot. /// void ClearQuickCacheSlot(unsigned int maskId) { if (m_pCoreAffinityQuickCache[static_cast(maskId) << QUICKCACHEPAD_SHIFT] == reinterpret_cast(1)) { InterlockedCompareExchangePointer(reinterpret_cast (m_pCoreAffinityQuickCache + (static_cast(maskId) << QUICKCACHEPAD_SHIFT)), reinterpret_cast (NULL), reinterpret_cast (1)); } } /// /// Clears a given quick cache slot if the slot contains a specific value. /// void ClearQuickCacheSlotIf(unsigned int maskId, ScheduleGroupSegmentBase *pSegment) { if (m_pCoreAffinityQuickCache[static_cast(maskId) << QUICKCACHEPAD_SHIFT] == pSegment) { InterlockedCompareExchangePointer(reinterpret_cast (m_pCoreAffinityQuickCache + (static_cast(maskId) << QUICKCACHEPAD_SHIFT)), reinterpret_cast (NULL), reinterpret_cast (pSegment)); } } /// /// Sets a given quick cache slot. Each execution resource (by mask id) gets a quick cache slot. When a work item arrives that is specifically /// affinitized to a given execution resource, the segment containing that work item is stashed in the quick cache slot for the corresponding /// execution resource. This is a fast check which is made repeatedly during search-for-work. This allows a virtual processor which is idle /// searching for work or which is executing non-affine work to quickly snap back to an affine segment without the need for a search. This allows /// more rapid virtual processor spin-up for certain affinity scenarios. /// void SetQuickCacheSlot(unsigned int maskId, ScheduleGroupSegmentBase *pSegment) { if (m_pCoreAffinityQuickCache[static_cast(maskId) << QUICKCACHEPAD_SHIFT] == NULL) { InterlockedCompareExchangePointer(reinterpret_cast (m_pCoreAffinityQuickCache + (static_cast(maskId) << QUICKCACHEPAD_SHIFT)), reinterpret_cast (pSegment), reinterpret_cast (NULL)); } } /// /// Notifies the scheduler that a thread serving a virtual processor with the given mask id is actively searching for work. This /// will prevent other virtual processors from picking up work which is affine to maskId but not affine to the other virtual processor. /// void NotifySearching(unsigned int maskId, bool fSearching) { if (fSearching) { m_idleSearch.InterlockedSet(maskId); OMTRACE(MTRACE_EVT_SEARCHINGTRUE, this, NULL, NULL, maskId); ClearQuickCacheSlot(maskId); } else { m_idleSearch.InterlockedClear(maskId); OMTRACE(MTRACE_EVT_SEARCHINGFALSE, this, NULL, NULL, maskId); } } /// /// Returns whether or not any of the set of virtual processors represented by bitSet is searching for work. /// bool HasSearchers(const QuickBitSet& bitSet) const { return m_idleSearch.Intersects(bitSet); } /// /// Checks whether a periodic scan is necessary, and if so, performs it. /// void PeriodicScan(ULONGLONG serviceTime) { // // Right now, we only perform livelock service scan every 2 seconds. // if (serviceTime - m_lastServiceScan > 2000) PerformServiceScan(serviceTime); } /// /// Increments the count of active resources by a given resource's mask id. /// void IncrementActiveResourcesByMask(unsigned int maskId) { m_activeSet.InterlockedSet(maskId); } /// /// Decrements the count of active resources by a given resource's mask id. /// void DecrementActiveResourcesByMask(unsigned int maskId) { m_activeSet.InterlockedClear(maskId); } //************************************************** // // TRANSITION: This is temporary until such time as we can hook into priority to solve livelock issues. // bool HasPriorityObjects() const { return !m_priorityObjects.Empty(); } BoostedObject *GetNextPriorityObject(); void RemovePrioritizedObject(BoostedObject *pEntry); // // TRANSITION: End of temporary section // //************************************************** protected: SchedulerPolicy m_policy; // scheduler policy fields ::Concurrency::SchedulerType m_schedulerKind; ::Concurrency::SchedulingProtocolType m_schedulingProtocol; unsigned short m_localContextCacheSize; // The total number of virtual processors in the scheduler, not including oversubscribed virtual processors. // This number is adjusted as dynamic RM adds and removes cores. volatile LONG m_virtualProcessorCount{}; // The default scheduler static SchedulerBase* s_pDefaultScheduler; static _StaticLock s_defaultSchedulerLock; // The default scheduler policy static SchedulerPolicy* s_pDefaultSchedulerPolicy; // TLS data static DWORD t_dwContextIndex; DWORD m_dwExternalStatisticsIndex; // // NOTE: Must cleanup up m_nodes before m_rings // NumaInformation* m_numaInformation{}; SchedulingNode** m_nodes{}; SchedulingRing** m_rings{}; int m_numaCount{}; int m_nodeCount{}; // // The active set of virtual processors on this scheduler. // ReferenceCountedQuickBitSet m_activeSet; // // Tracking for virtual processors which need messages of notification for affine work scheduling, etc... // ReferenceCountedQuickBitSet m_idleSearch; ReferenceCountedQuickBitSet m_nonAffineResourceListeners; QuickBitSet m_affinityMessages; // // Quick cache for core affine tasks. // ScheduleGroupSegmentBase* volatile * m_pCoreAffinityQuickCache{}; // The list of schedule groups within the scheduler. Note that while groups are owned by the scheduler, a group is merely // a collection of segments where the individual segments are owned by scheduling rings. This allows groups with affinity applied // as well as separation of work within a group by which node scheduled it. ListArray m_scheduleGroups; // The single anonymous schedule group for the scheduler. The anonymous schedule group will have one segment per ring. ScheduleGroupBase *m_pAnonymousScheduleGroup{}; // Lock free list of all internal contexts in the scheduler LockFreePushStack m_allContexts; SafeRWList m_finalEvents; // A list array that keeps statistical information for all non-internal contexts ListArray m_externalThreadStatistics; // Lock that guards the data structures for tracking context exit events. _NonReentrantBlockingLock m_listArrayDeletionLock; /// /// Activate the given virtual processor /// void ActivateVirtualProcessor(VirtualProcessor *pVirtualProcessor, ScheduleGroupBase *pGroup); /// /// Returns a newly constructed internal context appropriate to the given type of scheduler. /// virtual InternalContextBase *CreateInternalContext() =0; /// /// Increments the reference counts required by a scheduler attach. /// void ReferenceForAttach(); /// /// Decrements the reference counts incremented for scheduler attach. /// void ReleaseForDetach(); /// /// Returns a current number of active virtual processors for this scheduler /// /// /// Returns a current number of active virtual processors for this scheduler. No error state. /// unsigned int GetNumberOfActiveVirtualProcessors() const { return m_activeVProcCount; }; /// /// Notification after a virtual processor goes from INACTIVE to ACTIVE or ACTIVE to INACTIVE /// /// /// True if a virtual processor is going from INACTIVE to ACTIVE, and false if it is going from ACTIVE to INACTIVE. /// /// /// Active virtual processor count after the transition /// virtual void VirtualProcessorActiveNotification(bool fActive, LONG activeCount) { (fActive); (activeCount); } /// /// Indicates the type of work which exists within the scheduler. /// enum PendingWorkType { /// /// No work exists within the scheduler. /// NoWork, /// /// There is user work within the scheduler (chores, tasks, blocked contexts, etc...). There may or may not /// be ancillary work. /// UserWork, /// /// There is ancillary work related to the scheduler (e.g.: queued timers for throttling, etc...) /// OnlyAncillaryWork }; /// /// Determines if there is pending work such as blocked context/unstarted chores etc in the /// scheduler. If there is no pending work, the scheduler will attempt to shutdown. /// virtual PendingWorkType TypeOfWorkPending(); /// /// Initialize scheduler event handlers/background threads /// virtual void InitializeSchedulerEventHandlers(); /// /// Destroy scheduler event handlers/background threads /// virtual void DestroySchedulerEventHandlers(); /// /// Cancel all the internal contexts. /// virtual void CancelAllContexts(); /// /// Returns the count of bound contexts on the scheduler. /// ULONG GetNumberOfBoundContexts() const { return (ULONG)m_boundContextCount; } // Implementation for IScheduler interface APIs that is shared between to all derived classes. /// /// Called by the resource manager in order to gather statistics for a given scheduler. The statistics gathered here /// will be used to drive dynamic feedback with the scheduler to determine when it is appropriate to assign more resources /// or take resources away. Note that these counts can be optimistic and do not necessarily have to reflect the current /// count with 100% synchronized accuracy. /// /// /// The number of tasks which have been completed by the scheduler since the last call to the Statistics method. /// /// /// The number of tasks that have arrived in the scheduler since the last call to the Statistics method. /// /// /// The total number of tasks in all scheduler queues. /// void Statistics(unsigned int *pTaskCompletionRate, unsigned int *pTaskArrivalRate, unsigned int *pNumberOfTasksEnqueued); /// /// Called when the resource manager is giving virtual processors to a particular scheduler. The virtual processors are /// identified by an array of IVirtualProcessorRoot interfaces. This call is made to grant virtual processor roots /// at initial allocation during the course of ISchedulerProxy::RequestInitialVirtualProcessors, and during dynamic /// core migration. /// /// /// An array of IVirtualProcessorRoot interfaces representing the virtual processors being added to the scheduler. /// /// /// Number of IVirtualProcessorRoot interfaces in the array. /// void AddVirtualProcessors(IVirtualProcessorRoot **ppVirtualProcessorRoots, unsigned int count); /// /// Called when the resource manager is taking away virtual processors from a particular scheduler. The scheduler should /// mark the supplied virtual processors such that they are removed asynchronously and return immediately. Note that /// the scheduler should make every attempt to remove the virtual processors as quickly as possible as the resource manager /// will reaffinitize threads executing upon them to other resources. Delaying stopping the virtual processors may result /// in unintentional oversubscription within the scheduler. /// /// /// An array of IVirtualProcessorRoot interfaces representing the virtual processors which are to be removed. /// /// /// Number of IVirtualProcessorRoot interfaces in the array. /// void RemoveVirtualProcessors(IVirtualProcessorRoot **ppVirtualProcessorRoots, unsigned int count); /// /// Invoked when the Gate Count goes to zero as a result of virtual processor state transitions, while the /// scheduler has been marked for shutdown. It proceeds to sweep the scheduler if it can set the suspend flag /// on the shutdown gate while the gate count is still 0 and the scheduler is marked for shutdown. /// void AttemptSchedulerSweep(); /// /// Returns whether the reserved context pool can be utilized to fetch contexts to bypass throttling. /// virtual bool AllowGeneralFetchOfReservedContexts() { return true; } private: friend class ContextBase; friend class ::Concurrency::CurrentScheduler; friend class ScheduleGroupBase; friend class ScheduleGroupSegmentBase; friend class FairScheduleGroup; friend class CacheLocalScheduleGroup; friend class InternalContextBase; friend class ExternalContextBase; friend class VirtualProcessor; friend class SchedulingRing; friend class SchedulingNode; friend class SafePointInvocation; // // TRANSITION: This is a temporary patch for livelock prevention until we can hook into priority. // TRANSITION: This **MUST** have a hyper lock on it. // SafeRWList m_priorityObjects; // The list of invocations for safe point registrations. SafeSQueue m_safePointInvocations; // Counter used to assign unique identifiers to contexts. volatile LONG m_contextIdCounter; // Counter used to assign unique identifiers to schedule groups. volatile LONG m_scheduleGroupIdCounter; // Counter used to assign unique identifiers to work queues. static volatile LONG s_workQueueIdCounter; // The current safe point version for data. This indicates the newest data requiring observation by all virtual processors volatile ULONG m_safePointDataVersion; // The current safe point commit version. This indicates the newest data that has been observed by all virtual processors volatile ULONG m_safePointCommitVersion; // The pending version that is being committed by one of the vprocs. volatile ULONG m_safePointPendingVersion; // Hash tables for conversion Hash m_resourceNodeMap; Hash m_resourceBitMap; // scheduler id unsigned int m_id; // Round-robin index for scheduling ring. unsigned int m_nextSchedulingRingIndex; // Handle to a semaphore used to synchronize during scheduler finalization. HANDLE m_hSchedulerShutdownSync{}; // // Reference counts: // // m_refCount -- The externally visible reference count on the scheduler. Incremented for attachment // and for explicit calls to Reference. When this reference count falls to zero, the // scheduler initiates shutdown. When m_internalContextCount falls to zero, the // scheduler finalizes. // // m_attachCount -- The count of external contexts to which this scheduler is attached. This is primarily // present for debugging purposes. // // m_internalContextCountPlusOne -- The count of internal contexts on the scheduler plus one. Note that // it's +1 to explicitly handle any possibility of scheduler shutdown // before internal contexts are created. // // m_boundContextCount -- The count of internal contexts which are currently bound. This affects how the scheduler // throttles thread creation. // volatile LONG m_refCount; volatile LONG m_attachCount; volatile LONG m_internalContextCountPlusOne; volatile LONG m_initialReference; volatile LONG m_boundContextCount; // // The virtual processor shutdown gate. This is used to implement scheduler shutdown, by ensuring a handshake // when all virtual processors go idle. When such happens, no virtual processor may go active again without // handshaking. During the period between handshakes, the scheduler is free to sweep schedule groups // to detect whether finalization is yet appropriate. // // Layout: // 31 - SHUTDOWN_INITIATED_FLAG -- indicates that the external reference count on the scheduler has fallen to zero, // and the scheduler should be able to finalize when all work queued to it has // completed. This flag may be reset at a later point if an internal context // ends up resurrecting the scheduler. // 30 - SUSPEND_GATE_FLAG -- indicates a suspend phase while the scheduler is trying to evaluate whether // it is ready to finalize. A scheduler may find blocked contexts during this // phase and back off from finalization, resetting the flag. No contexts are allowed // to execute work during this phase, and no new virtual processors may be added // to the scheduler while this bit is set. // 29 - SHUTDOWN_COMPLETED_FLAG -- indicates that the scheduler has completed shutdown. This is the point of no // return, for this scheduler. At this point no work should exist in the scheduler, // and attempts to add any new virtual processors will fail, since the scheduler // is about to be destroyed. // volatile LONG m_vprocShutdownGate; // An indication of whether we have done a sweep without actual work. volatile LONG m_fSweepWithoutActualWork; // An indication of how long it has been since the last sweep for livelocked segments. volatile ULONGLONG m_lastServiceScan; static _StaticLock s_schedulerLock; static LONG s_initializedCount; // // The one shot initialization state has two parts, a reference count occupying the lower 31 bits and a flag indicating whether // one shot initialization was performed in the top bit. // static LONG s_oneShotInitializationState; IResourceManager *m_pResourceManager; ISchedulerProxy *m_pSchedulerProxy{}; // The count of virtual processors active in the scheduler. volatile LONG m_activeVProcCount; // The number of virtual processors available to schedule more work. // This does *NOT* take into account those virtual processors which are *inactive pending thread* volatile LONG m_virtualProcessorAvailableCount{}; // The number of virtual processors available pending a thread creation. volatile LONG m_virtualProcessorsPendingThreadCreate; // Statistics data counters volatile ULONG m_enqueuedTaskCounter; volatile ULONG m_dequeuedTaskCounter; // Statistics data checkpoints ULONG m_enqueuedTaskCheckpoint; ULONG m_dequeuedTaskCheckpoint; // // Throttling information: // ULONG m_threadsBeforeThrottling; ULONGLONG m_lastThrottledCreateTime; HANDLE m_hThrottlingTimer; volatile LONG m_pendingDeferredCreates; // Free list of internal contexts. LockFreeStack m_internalContextPool; // Free list of external contexts. LockFreeStack m_externalContextPool; // Free list of realized chores. LockFreeStack m_realizedChorePool; // List of reserved contexts LockFreeStack m_reservedContexts; // A stack that holds free suballocators. static LockFreeStack s_subAllocatorFreePool; // Number of suballocators for use by external contexts that are active in the process. static volatile LONG s_numExternalAllocators; // The max number of external contexts that could have suballocators at any given time. static const int s_maxExternalAllocators; // The maximum depth of the free pool of allocators. static const int s_allocatorFreePoolLimit; static void CheckStaticConstruction(); static void StaticConstruction(); static void StaticDestruction(); static void OneShotStaticConstruction(); static void OneShotStaticDestruction(); void Initialize(); void Cleanup(); int GetValidSchedulingRingIndex(int idx); int GetNextValidSchedulingRingIndex(int idx); /// /// Creates the correct type of virtual processor. /// virtual VirtualProcessor *CreateVirtualProcessor(SchedulingNode *pOwningNode, IVirtualProcessorRoot *pOwningRoot) = 0; /// /// Creates an external context and attaches it to the calling thread. Called when a thread attaches to a scheduler. /// ExternalContextBase *AttachExternalContext(bool explicitAttach); /// /// Detaches an external context from the scheduler it is attached to. Called when an external context actively detaches /// from a scheduler, or when the underlying thread for an implicitly attached external context exits. /// /// /// The external context being detached. /// /// /// Whether this was the result of an explicit detach or the thread exiting. /// void DetachExternalContext(ExternalContextBase* pContext, bool explicitDetach); /// /// Gets an external context from the idle pool, creating a new one if the idle pool is empty. /// ExternalContextBase *GetExternalContext(bool explicitAttach); /// /// Releases an external context of the to the scheduler's idle pool, destroying it if the idle pool is full. /// void ReleaseExternalContext(ExternalContextBase *pContext); /// /// Increments the reference count to the scheduler but does not allow a 0 to 1 transition. This API should /// be used to safely access a scheduler when the scheduler is not 'owned' by the caller. /// /// /// True if the scheduler was referenced, false, if the reference count was 0. /// bool SafeReference(); /// /// Returns the default scheduler creating one if it doesn't exist. /// /// /// A pointer to the default scheduler /// static SchedulerBase* GetDefaultScheduler(); // // Finalization: // /// /// Called to initiate shutdown of the scheduler. This may directly proceed to phase two of shutdown (actively /// shutting down internal contexts) or it may wait for additional events (e.g.: all work to complete) before /// proceeding to phase two. /// void PhaseOneShutdown(); /// /// Actively informs all internal contexts to exit and breaks them out of their dispatch loops. When the last /// internal context dies, finalization will occur and we move to SchedulerBase::Finalize(). /// void PhaseTwoShutdown(); /// /// Performs finalization of the scheduler deleting all structures, etc... This will also notify any listeners /// that the scheduler has actively shut down. /// void Finalize(); /// /// Once all virtual processors are idle, the scheduler calls this routine which performs a full sweep through all /// schedule groups looking for work. If work is found (even a blocked context), the scheduler backs off finalization; /// otherwise, it proceeds by asking all virtual processors for final check-in. /// void SweepSchedulerForFinalize(); /// /// Releases virtual processors that were suspended on the shutdown gate, while trying to go from IDLE to /// ACTIVE when the finalization sweep was in progress. /// /// /// Number of virtual processors that need to be released. /// void ReleaseSuspendedVirtualProcessors(LONG releaseCount); /// /// Called during scheduler finalization, after all virtual processors are suspended to check if any chores still /// exist in the scheduler. The calling thread is the only thread active in the scheduler at the time the function /// is called. /// /// /// A boolean value indicating whether any unstarted chores (realized or unrealized) were found. /// bool FoundUnstartedChores(); /// /// Called during scheduler finalization, before all virtual processors are suspended to check if any blocked /// contexts exist in the scheduler. /// /// /// A boolean value indicating whether any blocked contexts were found. /// bool FoundBlockedContexts(); /// /// Called to perform a resurrection of the scheduler. When the scheduler reference count has fallen to zero, /// it's possible there's still work on the scheduler and that one of those work items will perform an action /// leading to additional reference. Such bringing of the reference count from zero to non-zero is only legal /// on an *INTERNAL* context and immediately halts shutdown. /// void Resurrect(); /// /// Called to perform a commit of safe-point registrations up to **AND INCLUDING** a particular version. /// /// /// The data version that we commit to. A version of zero indicates a full commit. /// void CommitToVersion(ULONG commitVersion); /// /// Returns the commit version for safe points within the scheduler. /// ULONG ComputeSafePointCommitVersion(); /// /// Updates and returns the pending version for safe point commits. /// If there are no commits pending, 0 is returned. /// ULONG UpdatePendingVersion(); /// /// Updates the commit version to the given version and returns /// the pending commit version. If there are no commits pending, 0 is returned. /// /// /// The version up to which safe points have been committed. /// ULONG UpdateCommitVersion(ULONG commitVersion); /// /// Returns whether a particular version number is visible to us yet. Versions at the wrap-around point /// are not visible until we commit the wrap. /// bool IsVisibleVersion(ULONG version) { return (version >= m_safePointCommitVersion); } /// /// Returns the version we are allowed to see from an observation. This handles wrap around. /// ULONG ObservedVersion(ULONG version) { return (IsVisibleVersion(version) ? version : ULONG_MAX); } /// /// Publishes a new data version and returns the version number. /// ULONG PublishNewDataVersion() { ULONG dataVersion = InterlockedIncrement(reinterpret_cast(&m_safePointDataVersion)); // // Zero and ULONG_MAX are special keys used to handle wrap-around in the version counters. The commit counter may never be either of these values due // to a data version being them. // while (dataVersion == 0 || dataVersion == ULONG_MAX) dataVersion = InterlockedIncrement(reinterpret_cast(&m_safePointDataVersion)); return dataVersion; } /// /// Registers a callback at the next safe point after this function call. This should never be directly used by clients. /// SafePointInvocation::Register(...) should be used instead. /// /// /// The invocation object which is being registered. /// void InvokeOnSafePoint(SafePointInvocation *pInvocation); /// /// Send a scheduler ETW event /// void TraceSchedulerEvent(ConcRT_EventType eventType, UCHAR level, unsigned int schedulerId) { if (g_TraceInfo._IsEnabled(level, SchedulerEventFlag)) ThrowSchedulerEvent(eventType, level, schedulerId); } /// /// Changes the due time for dispatching new threads /// void ChangeThrottlingTimer(ULONG dueTime); /// /// Acts as a trampoline between the event wait and the timer wait as we cannot queue the timer in DeferredGetInternalContext /// due to limitations on what Win32 APIs can be called from a UMS primary. /// static void CALLBACK ThrottlerTrampoline(PVOID pData, BOOLEAN waitOrTimerFired); /// /// Creates new contexts. /// void ThrottlerDispatch(); /// /// Called to notify the scheduler that a context is available from the throttling manager / background creation. /// /// /// An indication of whether a virtual processor was awoken due to the context being utilized. /// bool NotifyThrottledContext(InternalContextBase *pContext); /// /// Create a schedule group within this scheduler. /// /// /// A pointer to a location where tasks within the schedule group will be biased towards executing at. /// /// /// A pointer to a newly created schedule group. /// ScheduleGroup* InternalCreateScheduleGroup(location* pPlacement); /// /// Internal claim of a quick cache slot. /// ScheduleGroupSegmentBase *ActualGetQuickCacheSlot(unsigned int maskId) { ScheduleGroupSegmentBase *pSegment = m_pCoreAffinityQuickCache[static_cast(maskId) << QUICKCACHEPAD_SHIFT]; if (pSegment > reinterpret_cast(1)) { ScheduleGroupSegmentBase *pXchgSegment = reinterpret_cast ( InterlockedCompareExchangePointer(reinterpret_cast (m_pCoreAffinityQuickCache + (static_cast(maskId) << QUICKCACHEPAD_SHIFT)), reinterpret_cast (1), reinterpret_cast (pSegment)) ); if (pSegment == pXchgSegment) return pSegment; } return NULL; } /// /// Performs the scheduler service scan. /// void PerformServiceScan(ULONGLONG serviceTime); /// /// A simple bridge to ThrottlerDispatch. This bridge is used for Vista and up (except MSDK) /// static void CALLBACK ThrottlerDispatchBridge(PTP_CALLBACK_INSTANCE, void * pContext, PTP_TIMER) { ThrottlerDispatchBridgeXP(pContext, true); } /// /// A simple bridge to ThrottlerDispatch. This bridge is used for XP and MSDK /// static void CALLBACK ThrottlerDispatchBridgeXP(PVOID pScheduler, BOOLEAN) { reinterpret_cast(pScheduler)->ThrottlerDispatch(); } static void ThrowSchedulerEvent(ConcRT_EventType eventType, UCHAR level, unsigned int schedulerId); // Hide assignment operator and copy constructor SchedulerBase const &operator =(SchedulerBase const &); SchedulerBase(SchedulerBase const &); }; } // namespace details } // namespace Concurrency