fetchHtmlBatch method
Fetches HTML content from multiple URLs concurrently
urls is the list of URLs to fetch
headers are additional headers to send with the request
timeout is the timeout for the request in milliseconds
retries is the number of retry attempts
ignoreRobotsTxt whether to ignore robots.txt rules (default: false)
onProgress is a callback for progress updates
Implementation
Future<Map<String, String>> fetchHtmlBatch({
required List<String> urls,
Map<String, String>? headers,
int? timeout,
int? retries,
bool ignoreRobotsTxt = false,
void Function(int completed, int total, String url)? onProgress,
}) async {
_logger.info('Fetching HTML batch: ${urls.length} URLs');
final results = <String, String>{};
final errors = <String, dynamic>{};
final completer = Completer<Map<String, String>>();
int completed = 0;
// Function to check if all tasks are completed
void checkCompletion() {
if (completed == urls.length) {
if (errors.isNotEmpty) {
_logger.warning(
'Batch completed with ${errors.length} errors: ${errors.keys.join(', ')}',
);
} else {
_logger.success('Batch completed successfully');
}
completer.complete(results);
}
}
// Add each URL as a task
for (final url in urls) {
_taskQueue.addTask<String>(
task:
() => _webScraper.fetchHtml(
url: url,
headers: headers,
timeout: timeout,
retries: retries,
ignoreRobotsTxt: ignoreRobotsTxt,
),
priority: 0,
taskName: 'FetchHTML-$url',
onStart: () {
_logger.info('Starting fetch for URL: $url');
},
onComplete: (result) {
_logger.success('Fetch completed for URL: $url');
results[url] = result;
completed++;
onProgress?.call(completed, urls.length, url);
checkCompletion();
},
onError: (error, stackTrace) {
_logger.error('Fetch failed for URL: $url - $error');
errors[url] = error;
completed++;
onProgress?.call(completed, urls.length, url);
checkCompletion();
},
);
}
return completer.future;
}