C# Client Library
A C# Client Library for the AnalyzeRe REST API
Loading...
Searching...
No Matches
LargeDataUpload_ExtensionMethods.cs
Go to the documentation of this file.
1using System;
2using System.Diagnostics;
3using System.IO;
4using System.Net;
5using System.Threading;
6
10using AnalyzeRe.Rest;
12using RestSharp;
14
15// ReSharper disable once CheckNamespace (Extension methods classes must be on root namespace)
16namespace AnalyzeRe
17{
19 public static class LargeDataUpload_ExtensionMethods
20 {
22 public const int DefaultBufferSize = 4096;
23
24 #region Extension Methods
25 #region Asynchronous Invocations
32 string filePath, AsyncParameters parameters = null)
33 {
34 dataEndpoint.ValidateOwnerHasId();
35 parameters = parameters ?? new AsyncParameters();
37 {
38 try
39 {
40 using (FileStream fs = Utils.OpenReadOnlyFileStream(filePath))
41 {
43 }
44 }
45 catch (Exception ex)
46 {
47 parameters.OnError?.Invoke(ex);
48 }
49 }).Start();
50 }
51
61 Stream dataStream, AsyncParameters parameters = null, long? dataSize = null)
62 {
63 dataEndpoint.ValidateOwnerHasId();
64 parameters = parameters ?? new AsyncParameters();
66 {
67 try
68 {
70 }
71 catch (Exception ex)
72 {
73 parameters.OnError?.Invoke(ex);
74 }
75 }).Start();
76 }
77 #endregion Asynchronous Invocations
78
79 #region Synchronous Invocations
84 public static void LargeFileUpload(
85 this LargeDataSubResource dataEndpoint, string filePath, Parameters parameters = null)
86 {
87 dataEndpoint.ValidateOwnerHasId();
88 using (FileStream fs = Utils.OpenReadOnlyFileStream(filePath))
89 {
90 LargeStreamUpload(dataEndpoint, fs, parameters);
91 }
92 }
93
102 public static void LargeStreamUpload(
104 Parameters parameters = null, long? dataSize = null)
105 {
106 dataEndpoint.ValidateOwnerHasId();
107 // Wrap any user-supplied parameters in our own to handle errors synchronously.
109 onError: e =>
110 {
111 // Make sure to call the original OnError callback first if there is one.
112 (parameters as AsyncParameters)?.OnError?.Invoke(e);
113 throw e;
114 });
116 }
117 #endregion Synchronous Invocations
118 #endregion Extension Methods
119
120 #region Segmented Upload Implementation
125 AsyncParameters parameters,
126 long? dataSize = null)
127 {
128 // Verify that all required parameters are set to non-null values.
129 if (dataEndpoint == null) throw new ArgumentNullException(nameof(dataEndpoint));
130 if (dataStream == null) throw new ArgumentNullException(nameof(dataStream));
131 if (parameters == null) throw new ArgumentNullException(nameof(parameters));
132
133 // Get the size of the file
134 // If no file size was supplied, derive it from the stream's properties
135 // If no file size is available, the server cannot validate the entire file was received.
137
138 // ReSharper disable once RedundantAssignment (debug mode requires this assignment)
139 DateTime start = DateTime.UtcNow;
140 if (Utils.EnableDebugLogging)
141 {
142 Debug.WriteLine($"{start.ToString(API.DebugTimeFormat)} " +
143 $"{dataEndpoint.Owner.GetType().NiceTypeName()} Large File Upload Initiated " +
144 $"({(fileSize.HasValue ? fileSize + " bytes)" : "unknown size)")}");
145 // Allows for extensive debugging of large file upload progress.
146 // (Hooking this event is expensive, so don't do it in Release mode)
147 void OnProgressUpdate(StatusResponse s)
148 {
149 DateTime now = DateTime.UtcNow;
150 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} Upload Progress Update. " +
151 $"Time Elapsed: {(now - start).TotalMilliseconds}ms, " +
152 $"Status: {s.status} ({s.status.ToAPIString()}), " +
153 $"Uploaded: {s.bytes_uploaded} of {s.total_bytes?.ToString() ?? "(unknown)"}" +
154 $"{(s.commit_progress.HasValue ? $", Commit: {s.commit_progress}" : "")}" +
155 $"{(String.IsNullOrEmpty(s.commit_error) ? "" : $", Error: {s.commit_error}")}");
156 }
157
158 // Wrap the existing parameters to add our own debug hooks.
159 AsyncParameters oldParameters = parameters;
160 parameters = new AsyncParameters(oldParameters, onProgressUpdate: s =>
161 {
162 OnProgressUpdate(s);
163 oldParameters.OnProgressUpdate?.Invoke(s);
164 });
165 }
166
167#pragma warning disable 618
168 // TODO: Deprecate binary YELT format and related logic.
169 bool isBinaryYELTUpload = (dataEndpoint.Owner as YELTLossSet)?.data_type == YELTLossSet.DataType.binary;
170 // Hack: If this is a binary YELT upload, the final file uploaded will be a zip file
171 // (not just temporarily zipped for transmission). We won't know the final file size,
172 // so we must set it to null now even if it can be determined from the stream.
174 {
175 fileSize = null;
176 if (Utils.EnableDebugLogging)
177 Debug.WriteLine($"{DateTime.UtcNow.ToString(API.DebugTimeFormat)} - LFU " +
178 $"binary YELTLossSet detected, file size ignored.");
179 }
180#pragma warning restore 618
181
182 // Initialize the upload session. If we are resuming a previous upload, get the offset
183 // we should start streaming data from.
184 long currentStreamPosition = 0;
185 // TODO: What if the user's stream has already been advanced? Can this be configured?
187
188 // If the file isn't already fully uploaded, begin the process of uploading data
189 if (bytesToSkip != fileSize)
190 {
191 // If we are resuming a previous upload, advance the stream to where uploading will resume
192 if (bytesToSkip.HasValue && bytesToSkip.Value > 0)
193 {
195 if (Utils.EnableDebugLogging)
196 Debug.WriteLine($"{DateTime.UtcNow.ToString(API.DebugTimeFormat)} - " +
197 $"LFU ignored the first {bytesToSkip.Value} bytes of the stream's " +
198 $"{fileSize} total bytes in order to resume a previous upload.");
199 }
200
201 // Upload the entire file starting from the current point in the data stream.
203 if (Utils.EnableDebugLogging)
204 {
205 DateTime now = DateTime.UtcNow;
206 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} Upload Complete: " +
207 $"{(now - start).TotalMilliseconds}ms");
208 }
209 }
210 // If the bytesToSkip = fileSize, there's nothing to upload, so go straight to the commit phase.
211 else
212 {
213 if (fileSize.HasValue)
215 if (Utils.EnableDebugLogging)
216 Debug.WriteLine($"{DateTime.UtcNow.ToString(API.DebugTimeFormat)} - LFU ignored all " +
217 $"{fileSize?.ToString() ?? "(unknown length)"} bytes of the stream in order to " +
218 $"resume monitoring the status of a previous upload that has already been committed.");
219 }
220
221 // Notify that we have 100% completed the upload.
222 parameters.OnProgressUpdate?.Invoke(new StatusResponse
223 {
224 status = TaskStatus.Uploading,
225 bytes_uploaded = currentStreamPosition,
226 total_bytes = currentStreamPosition
227 });
228
229 // Commit the file to begin server-side processing and validation
230 CommitUpload(dataEndpoint, parameters);
231 if (Utils.EnableDebugLogging)
232 {
233 DateTime now = DateTime.UtcNow;
234 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} Commit Complete. " +
235 $"Total Large file upload time: {(now - start).TotalMilliseconds}ms");
236 }
237 }
238
239 // Helper method that determines the length of the Stream from it's properties
240 // or throws an exception if that's not possible.
241 private static long? DetermineStreamLength(Stream dataStream)
242 {
243 long dataStreamStart = dataStream.CanSeek ? dataStream.Position : 0;
244 // Get the file size from the stream (minus the current position)
245 return dataStream.CanSeek ? dataStream.Length - dataStreamStart : (long?)null;
246 }
247
254 private static long AdvanceStream(Stream dataStream, long bytes)
255 {
256 // Skip ahead in the supplied stream now if we need to and are able to.
257 if (dataStream.CanSeek)
258 return dataStream.Position += bytes;
259 // If the stream doesn't support seeking, we must use Read() to advance :(
260 byte[] ignored = new byte[Math.Min(bytes, DefaultBufferSize)];
261 long streamPosition = 0;
262 while (streamPosition < bytes && dataStream.CanRead)
263 {
264 int bytesRead = dataStream.Read(ignored, 0,
265 (int)Math.Min(ignored.Length, bytes - streamPosition));
266 // Some types of streams simply return no bytes when you reach EOF.
267 if (bytesRead == 0)
268 break;
270 }
271 if (streamPosition < bytes)
272 throw new EndOfStreamException($"AdvanceStream was asked to advance {bytes} " +
273 $"bytes, but the stream ended after reading {streamPosition} bytes.");
274 return streamPosition;
275 }
276
277 // Helper method that streams data from the current offset until the end of the file.
278 // Returns the final stream position after upload completion.
280 AsyncParameters parameters, long streamPosition, long? streamLength)
281 {
282 // The following properties are only used for purposes of debugging and timing the upload.
286 if (Utils.EnableDebugLogging)
287 {
288 Debug.WriteLine($"{start.ToString(API.DebugTimeFormat)} - LFU " +
289 $"starting from stream position of {streamPosition} bytes out of " +
290 $"{(streamLength.HasValue ? streamLength + " bytes." : "(unknown size).")}");
291 }
292
293 // Set the maximum source buffer length equal to the number of bytes
294 // we want to upload in each PATCH request to the server.
295 int max_buffer_length = parameters.max_chunk_size;
296 // If we know the length of the entire file, we can potentially shrink the max buffer size.
297 if (streamLength.HasValue && streamLength.Value < max_buffer_length)
300
301 // Set up the producer-consumer chain of buffered bytes to be uploaded.
302 // The basic source is a buffer for the file stream
305 // Add on the gzip layer if applicable
306 if (parameters.enable_compression)
308
309#pragma warning disable 618
310 // TODO: Deprecate binary YELT format and related logic.
311 bool isBinaryYELTUpload = (dataEndpoint.Owner as YELTLossSet)?.data_type == YELTLossSet.DataType.binary;
312 // Automatic conversion can be toggled off in the parameters to support
313 // e.g. downloading and re-uploading a file that has already been converted to binary.
315 {
316 if (parameters.binary_yelt_options != Parameters.BinaryYELTUploadOptions.AutomaticCompressionOnly)
319 }
320#pragma warning restore 618
321
322 using (restSource)
323 {
324 // If this is a GZipStream, we need to keep track of *uncompressed* bytes
325 // uploaded because that's what the server expects the offset to be.
327 long currentChunkNumber = 0;
328
329 // Tracks the overhead in setting up the producer/consumer chain.
330 if (Utils.EnableDebugLogging)
331 {
332 DateTime now = DateTime.UtcNow;
333 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} - LFU " +
334 $"time to setup: {(now - checkpoint).TotalSeconds}s");
335 checkpoint = now;
337 }
338
339 // Tell the producer/consumer threads to begin buffering data.
341
342 // Loop until we've uploaded the remaining bytes in the file.
343 // If the file size is unknown, loop until an inner exit condition is met
344 while (!streamLength.HasValue || streamPosition < streamLength)
345 {
346 // Check to see if the upload has been cancelled before each chunk.
347 Utils.CheckForCancellation(parameters);
348
349 // Invoke the OnProgressUpdate callback (if any) with a mocked
350 // status response to avoid a round trip request to the server.
351 parameters.OnProgressUpdate?.Invoke(new StatusResponse
352 {
353 status = TaskStatus.Uploading,
354 bytes_uploaded = streamPosition,
355 total_bytes = streamLength
356 });
357 // Raise an error if the end of stream is encountered too early
358 // Need to check in this order to avoid race condition where
359 // item is added to collection and marked complete after the "Any()" check.
360 if (!restSource.CanTake)
361 {
362 // If the file length is unknown, this is the expected way of breaking the loop.
363 if (!streamLength.HasValue)
364 break;
365 // Otherwise, if we haven't received the full file we were expecting, raise an error.
366 throw new EndOfStreamException($"Expected to receive {streamLength} bytes of data, " +
367 $"but the end of the data source was reached after {streamPosition} bytes.");
368 }
369
370 // Try to get the next buffered chunk (or wait for one to become available)
373 // If no new data was returned, jump back to the start of the loop.
374 if (!success) continue;
375
376 double elapsedSeconds, megabytes = 0;
377 if (Utils.EnableDebugLogging)
378 {
379 // Tracks how long it took to produce the last chunk.
380 DateTime now = DateTime.UtcNow;
382 megabytes = nextChunk.LengthFilled / 1048580d;
383 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} - LFU chunk " +
384 $"{currentChunkNumber} size: {nextChunk.LengthFilled} bytes ({megabytes:0.00} MiB)");
385 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} - LFU time to " +
386 $"produce chunk {currentChunkNumber}: {elapsedSeconds}s (" +
387 (Math.Abs(elapsedSeconds) < 1E-4 ? "was ready)" : $"{megabytes / elapsedSeconds:0.0} MiBps)"));
388 checkpoint = now;
389 }
390 // Try to upload this chunk
391 UploadChunk(dataEndpoint, nextChunk.Bytes, nextChunk.LengthFilled, streamPosition, parameters);
392 if (Utils.EnableDebugLogging)
393 {
394 // Tracks how long it took to upload the last chunk.
395 DateTime now = DateTime.UtcNow;
397 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} - LFU time " +
398 $"to upload chunk {currentChunkNumber}: {elapsedSeconds}s " +
399 $"({megabytes / elapsedSeconds:0.0} MiBps)");
400 checkpoint = now;
402 }
403 // Update the stream position to reflect what has been uploaded to the server
405 }
406 // If data is being compressed, we can output the compression ratio achieved.
407 if (Utils.EnableDebugLogging && sourceAsGZipProducer != null)
408 Debug.WriteLine($"Compressed a total of {sourceAsGZipProducer.TotalBytesIn} bytes " +
409 $"into {sourceAsGZipProducer.TotalBytesOut} bytes (" +
410 $"{sourceAsGZipProducer.TotalBytesOut / (double)sourceAsGZipProducer.TotalBytesIn:P2} the original size).");
411 }
412
413 if (Utils.EnableDebugLogging)
414 {
415 // Tracks how long the entire file took to upload, and the effective throughput.
416 DateTime now = DateTime.UtcNow;
417 double total_elapsed = (now - start).TotalSeconds;
418 long total_bytes = streamPosition - initialStreamPosition;
419 double total_megabytes = total_bytes / 1048580d;
420 Debug.WriteLine($"{now.ToString(API.DebugTimeFormat)} - LFU " +
421 $"total upload time: {total_elapsed}s for {total_bytes} bytes " +
422 $"({total_megabytes:0.0} MiB - {total_megabytes / total_elapsed:0.0} MiBps)");
423 }
424 return streamPosition;
425 }
426
428 private static void CommitUpload(LargeDataSubResource dataEndpoint, AsyncParameters parameters)
429 {
430 StatusResponse status = null;
431
432 // Check to see if the upload has been cancelled before attempting the commit.
433 Utils.CheckForCancellation(parameters);
434
435 // HACK: Since the user is willing to wait up until "MaxPollTotalTime" for the result
436 // to be ready, in theory any request leading up to polling can take this long
437 // before timing out and still be acceptable. This is a hack because we aren't
438 // keeping track of the total time taken from beginning to end of the "commit"
439 // process - we should really only allow polling for MaxPollTotalTime minus
440 // the total time taken of all requests before we started actually polling.
442
443 // Finalize the upload session.
444 // In most cases, it's faster to invoke commit immediately and expect a successful
445 // response. In the few scenarios where it is not appropriate to invoke commit
446 // (we are resuming a previously committed upload, or there was an error and
447 // the chunks are incomplete) we can follow this up with a status get to clarify
448 try
449 {
451 if (response.StatusCode != HttpStatusCode.Accepted)
452 throw new APIRequestException("Unexpected Server response to commit.", null, response);
453 }
454 catch (APIRequestException e)
455 {
456 if (e.RestResponse.StatusCode != HttpStatusCode.MethodNotAllowed)
457 throw;
458 // If the POST is 405 not allowed, we must be resuming another commit or
459 // in an incomplete state. Get the status from the server
460 status = dataEndpoint.status.Get(timeout: requestTimeout);
461 // If we're not in one of the below states, re-throw the error that was encountered
462 if (!status.status.IsBusy() && !status.status.IsProcessingComplete())
463 throw;
464 // Otherwise continue with the logic for monitoring the processing
465 }
466
467 // Poll the status endpoint with GET until processing is complete.
468 if (status == null || status.status.IsBusy())
469 status = Utils.PollUntilDataProcessed(dataEndpoint, parameters);
470 // If the processing is complete, report either success or error.
471 if (status.status == TaskStatus.Success)
472 parameters.OnSuccess?.Invoke(status);
473 else if (status.status.IsError())
474 parameters.OnError?.Invoke(new CommitFailedException(status.commit_error));
475 else
476 parameters.OnError?.Invoke(new CommitFailedException(
477 $"The data endpoint entered an unexpected state \"{status.status}\" " +
478 $"while attempting to commit the upload. Server status is: {status.status}"));
479 }
480
483 {
484 new Parameter
485 {
486 Type = ParameterType.HttpHeader,
487 Name = "Content-Encoding",
488 Value = "gzip"
489 }
490 });
491
493 private static void UploadChunk(LargeDataSubResource dataEndpoint, byte[] data, int count,
494 long offset, AsyncParameters parameters)
495 {
496 int uploadAttempts = 0;
497 while (true)
498 {
500 try
501 {
502 dataEndpoint.Patch(data, count, offset, parameters.chunk_timeout,
503 parameters.enable_compression ? GzipEncodingHeader : null);
504 return;
505 }
506 // If the error was a timeout, try again
507 catch (TimeoutException)
508 {
509 if (uploadAttempts >= parameters.max_retries_per_chunk)
510 throw;
511 }
512 // If the error is an APIRequestException:
513 catch (APIRequestException e)
514 {
515 if (e.RestResponse.StatusCode != HttpStatusCode.MethodNotAllowed)
516 throw;
517
518 // Get the status from the server
520 var status = dataEndpoint.status.Get(timeout: requestTimeout);
521
522 // If we're not in one of the below states, re-throw the error that was encountered
523 if (!status.status.IsBusy() && !status.status.IsProcessingComplete())
524 throw;
525
526 return;
527 }
528 Utils.CheckForCancellation(parameters);
529 }
530 }
531
540 private static long? InitiateUploadSession(
542 {
543 // Request the current status of the data endpoint
544 StatusResponse status = dataEndpoint.status.Get();
545 TaskStatus state = status.status;
546
547 // How we proceed usually depends on our conflict resolution strategy
549
550 // Check for the simplest statuses which don't require much additional work
551 switch (state)
552 {
553 case TaskStatus.Awaiting_Upload:
554 // Initiate the large upload and that's it.
556 return 0;
557 case TaskStatus.Success:
558 // Previous upload finalized and successful, raise an error if we aren't resuming
559 if (strategy != HandleUploadInSessionStrategy.AttemptResume)
560 throw new InvalidOperationException("Cannot upload data for this resource " +
561 "because existing data has already been successfully uploaded.");
562 break;
563 case TaskStatus.Unknown:
564 // Hopefully this never happens.
565 throw new APIRequestException("Could not determine the current state " +
566 "of this data upload process. Cannot continue.");
567 }
568
569 if (strategy == HandleUploadInSessionStrategy.RaiseError)
570 throw new UploadInProgressException();
571 if (strategy == HandleUploadInSessionStrategy.CancelPriorUpload)
572 {
573 if (state != TaskStatus.Uploading && state != TaskStatus.Failed)
574 throw new UploadInProgressException("An conflicting data upload is currently " +
575 "being processed and cannot be deleted. Please wait until processing is " +
576 "of the file is complete.");
577 // Delete the current data endpoint, then initiate the new large upload.
578 dataEndpoint.Delete();
580 return 0;
581 }
582 // (This should never happen unless the enum is changed without revising this method.)
583 if (strategy != HandleUploadInSessionStrategy.AttemptResume)
584 throw new ArgumentException($"Unexpected upload conflict strategy: {strategy}");
585
586 // Raise an error if we aren't in one of the expected possible states
587 // (shouldn't happen if the above conditional logic is sound)
588 if (!(state == TaskStatus.Uploading || state == TaskStatus.Queued
589 || state == TaskStatus.Processing || state == TaskStatus.Success
590 || state == TaskStatus.Failed))
591 {
592 throw new Exception($"Internal error. Unexpected status: {state}");
593 }
594 // At this point the UploadStatus is one of the following:
595 // UploadingChunks, Processing, Processing_Failed, or Processing_Successful
596 // In all cases, advance the stream to match the server reported offset (in the
597 // 'processed' cases this should be EOF) and let the upload process commit or re-commit.
598
599 // If supplied, ensure the total_bytes matches our file's length
600 if (fileSize != null && status.total_bytes != null && status.total_bytes.Value != fileSize)
601 {
603 $"Attempted to resume an existing upload session with a file of different size. " +
604 $"Prior file size: {status.total_bytes.Value} This file size: {fileSize}");
605 }
606
607 // Special case - if the state is "Processing" the /status endpoint does not
608 // report anything that tells us the size of the file, so we have to assume
609 // it matches our file size and inform the caller to advance to the end of the file.
610 // TODO: Verify whether this is still the case. File a complaint if so ;)
611 if (state == TaskStatus.Processing || state == TaskStatus.Queued)
612 {
613 return fileSize;
614 }
615
616 // Ensure the bytes_uploaded lines up with this file.
617 if (status.bytes_uploaded == null)
618 throw new APIRequestException(
619 "Server status response was missing the expected bytes_uploaded value.");
620 if (status.bytes_uploaded.Value > fileSize)
621 {
622 throw new UploadInProgressException($"Server gave unexpected Offset value of: " +
623 $"{status.bytes_uploaded} which exceeds this file's size of: {fileSize}");
624 }
625
626 // Resume the file upload at the position specified by the offset header:
627 return status.bytes_uploaded.Value;
628 }
629 #endregion Segmented Upload Implementation
630 }
631}
A custom exception class that includes the RestSharp.IRestResponse that generated the exception,...
Describes an endpoint off of some types of resources from which an associated "large data file" can b...
Describes a collection of resources which can be listed.
Parameters to be used in a Large Upload operation.
Action< StatusResponse > OnProgressUpdate
Action to invoke every time a chunk is uploaded. Callback is supplied with a double indicating progre...
Action< StatusResponse > OnSuccess
Action to invoke upon successful completion of the large upload.
Action< Exception > OnError
Action to invoke upon failure of the large upload. If this is supplied, it will be called when an err...
Converts a YELT CSV data into a specified format.
Produces a BlockingCollection of buffered bytes asynchronously which can be consumed on some other th...
Exception raised when the commit process fails.
Parameters to be used in a Large Data Upload operation.
Definition Parameters.cs:7
PollingOptions commit_polling_options
Determines how the system polls the status of the data file while waiting for the data to be processe...
Definition Parameters.cs:47
int chunk_timeout
The timeout (in milliseconds) for a single chunk upload to complete. Default: 60,000 ms (1 minute).
Definition Parameters.cs:41
HandleUploadInSessionStrategy handle_existing_upload_strategy
The HandleUploadInSessionStrategy to employ of an existing upload session is already in progress....
Definition Parameters.cs:51
BinaryYELTUploadOptions
Options for controlling whether to automatically convert a binary YELTLossSet's data to the binary fo...
Definition Parameters.cs:67
bool enable_compression
Whether to compress (gzip) the data during upload. (Default: False) Set to true if the files you're u...
Definition Parameters.cs:59
int max_chunk_size
The maximum size of a single uploaded chunk in bytes. Default: 2^24 bytes (16 Megabytes).
Definition Parameters.cs:33
int min_chunk_size
The minimum size of a single uploaded chunk in bytes. Set this larger if there is enough latency betw...
Definition Parameters.cs:27
BinaryYELTUploadOptions binary_yelt_options
Options for controlling whether to automatically convert a binary YELTLossSet's data to the binary fo...
Definition Parameters.cs:82
int max_retries_per_chunk
The maximum number of retries for a failed chunk upload before an error is thrown....
Definition Parameters.cs:37
Indicates the status of a data endpoint.
TaskStatus status
The state of the data upload, as an enumeration.
string commit_error
If the processing of the file failed, the error message reported by the server.
long? bytes_uploaded
The number of bytes that have been uploaded by the client so far.
long? total_bytes
Total number of bytes in the file to be uploaded.
Indicates that a large upload could not take place because an existing large upload session is alread...
Large Data Upload Utilities.
Definition Utilities.cs:11
Representation of a loss set with an associated year-event-loss table.
DataType
The format of the data uploaded against this YELT.
CancellationToken? CancellationToken
A cancellation token (if available) supplied to the polling method to allow the polling task to be ca...
int MaxPollTotalTime
The maximum time (in milliseconds) to poll the request before raising a NotWaitingException....
Helper class which makes it easier to build a set of request parameters.
HandleUploadInSessionStrategy
Behaviour to use if an existing upload is already in progress for an endpoint.
The structure containing an array of bytes and integer indicating the number of bytes in the array th...
TaskStatus
The status of a data upload which may be in progress.
Definition TaskStatus.cs:9