import time
import random
def retry_html_page(url, max_retries=3, delay=1):
"""
Retries fetching an HTML page with exponential backoff.
"""
for attempt in range(max_retries):
try:
# Simulate fetching the HTML page (replace with your actual fetching logic)
# For demonstration, just simulate a potential error
if attempt < max_retries:
if random.random() < 0.2: # Simulate 20% chance of failure
raise Exception(f"Simulated error on attempt {attempt+1}")
# Replace this with your actual code to fetch the HTML page.
# For example:
# import urllib.request
# response = urllib.request.urlopen(url)
# html_content = response.read().decode('utf-8')
print(f"Successfully fetched {url} on attempt {attempt+1}")
return True # Success!
except Exception as e:
print(f"Attempt {attempt+1} failed: {e}")
if attempt < max_retries - 1:
# Exponential backoff
sleep_time = delay * (2 ** attempt) + random.uniform(0, 1) #add random jitter
print(f"Retrying in {sleep_time:.2f} seconds...")
time.sleep(sleep_time)
else:
print(f"Max retries reached for {url}. Giving up.")
return False # Failed after all retries
return False #Should not reach here unless max_retries is 0
if __name__ == '__main__':
test_url = "https://www.example.com" # Replace with your target URL
success = retry_html_page(test_url)
if success:
print(f"Successfully processed {test_url}")
else:
print(f"Failed to process {test_url} after multiple retries.")
Add your comment