private QueryEnd ( bool userInitiatedDispose ) : void | ||
userInitiatedDispose | bool | |
Résultat | void |
internal void QueryEnd(bool userInitiatedDispose)
{
Debug.Assert(_rootTask != null);
//Debug.Assert(Task.Current == null || (Task.Current != _rootTask && Task.Current.Parent != _rootTask));
if (Interlocked.Exchange(ref _alreadyEnded, 1) == 0)
{
// There are four cases:
// Case #1: Wait produced an exception that is not OCE(ct), or an AggregateException which is not full of OCE(ct) ==> We rethrow.
// Case #2: External cancellation has been requested ==> we'll manually throw OCE(externalToken).
// Case #3a: We are servicing a call to Dispose() (and possibly also external cancellation has been requested).. simply return.
// Case #3b: The enumerator has already been disposed (and possibly also external cancellation was requested). Throw an ODE.
// Case #4: No exceptions or explicit call to Dispose() by this caller ==> we just return.
// See also "InlinedAggregationOperator" which duplicates some of this logic for the aggregators.
// See also "QueryOpeningEnumerator" which duplicates some of this logic.
// See also "ExceptionAggregator" which duplicates some of this logic.
try
{
// Wait for all the tasks to complete
// If any of the tasks ended in the Faulted stated, an AggregateException will be thrown.
_rootTask.Wait();
}
catch (AggregateException ae)
{
AggregateException flattenedAE = ae.Flatten();
bool allOCEsOnTrackedExternalCancellationToken = true;
for (int i = 0; i < flattenedAE.InnerExceptions.Count; i++)
{
OperationCanceledException oce = flattenedAE.InnerExceptions[i] as OperationCanceledException;
// we only let it pass through iff:
// it is not null, not default, and matches the exact token we were given as being the external token
// and the external Token is actually canceled (i.e. not a spoof OCE(extCT) for a non-canceled extCT)
if (oce == null ||
!oce.CancellationToken.IsCancellationRequested ||
oce.CancellationToken != _cancellationState.ExternalCancellationToken)
{
allOCEsOnTrackedExternalCancellationToken = false;
break;
}
}
// if all the exceptions were OCE(externalToken), then we will propagate only a single OCE(externalToken) below
// otherwise, we flatten the aggregate (because the WaitAll above already aggregated) and rethrow.
if (!allOCEsOnTrackedExternalCancellationToken)
throw flattenedAE; // Case #1
}
finally
{
//_rootTask don't support Dispose on some platforms
IDisposable disposable = _rootTask as IDisposable;
if (disposable != null)
disposable.Dispose();
}
if (_cancellationState.MergedCancellationToken.IsCancellationRequested)
{
// cancellation has occurred but no user-delegate exceptions were detected
// NOTE: it is important that we see other state variables correctly here, and that
// read-reordering hasn't played havoc.
// This is OK because
// 1. all the state writes (e,g. in the Initiate* methods) are volatile writes (standard .NET MM)
// 2. tokenCancellationRequested is backed by a volatile field, hence the reads below
// won't get reordered about the read of token.IsCancellationRequested.
// If the query has already been disposed, we don't want to throw an OCE
if (!_cancellationState.TopLevelDisposedFlag.Value)
{
CancellationState.ThrowWithStandardMessageIfCanceled(_cancellationState.ExternalCancellationToken); // Case #2
}
//otherwise, given that there were no user-delegate exceptions (they would have been rethrown above),
//the only remaining situation is user-initiated dispose.
Debug.Assert(_cancellationState.TopLevelDisposedFlag.Value);
// If we aren't actively disposing, that means somebody else previously disposed
// of the enumerator. We must throw an ObjectDisposedException.
if (!userInitiatedDispose)
{
throw new ObjectDisposedException("enumerator", SR.PLINQ_DisposeRequested); // Case #3
}
}
// Case #4. nothing to do.
}
}
}
//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. Executes synchronously, // and by the time this API has returned all of the results have been produced. // // Arguments: // groupState - values for inter-task communication // partitions - the producer enumerators // channels - the producer-consumer channels // taskScheduler - the task manager on which to execute // internal static void SpoolStopAndGo <TInputOutput, TIgnoreKey>( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, SynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler) { Debug.Assert(partitions.PartitionCount == channels.Length); Debug.Assert(groupState != null); // Ensure all tasks in this query are parented under a common root. Task rootTask = new Task( () => { int maxToRunInParallel = partitions.PartitionCount - 1; // A stop-and-go merge uses the current thread for one task and then blocks before // returning to the caller, until all results have been accumulated. We do this by // running the last partition on the calling thread. for (int i = 0; i < maxToRunInParallel; i++) { TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]); asyncTask.RunAsynchronously(taskScheduler); } TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel); // Run one task synchronously on the current thread. QueryTask syncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>( maxToRunInParallel, groupState, partitions[maxToRunInParallel], channels[maxToRunInParallel]); syncTask.RunSynchronously(taskScheduler); }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // We don't want to return until the task is finished. Run it on the calling thread. rootTask.RunSynchronously(taskScheduler); // Wait for the query to complete, propagate exceptions, and so on. // For pipelined queries, this step happens in the async enumerator. groupState.QueryEnd(false); }