1. import time
  2. import random
  3. def retry_html_page(url, max_retries=3, delay=1):
  4. """
  5. Retries fetching an HTML page with exponential backoff.
  6. """
  7. for attempt in range(max_retries):
  8. try:
  9. # Simulate fetching the HTML page (replace with your actual fetching logic)
  10. # For demonstration, just simulate a potential error
  11. if attempt < max_retries:
  12. if random.random() < 0.2: # Simulate 20% chance of failure
  13. raise Exception(f"Simulated error on attempt {attempt+1}")
  14. # Replace this with your actual code to fetch the HTML page.
  15. # For example:
  16. # import urllib.request
  17. # response = urllib.request.urlopen(url)
  18. # html_content = response.read().decode('utf-8')
  19. print(f"Successfully fetched {url} on attempt {attempt+1}")
  20. return True # Success!
  21. except Exception as e:
  22. print(f"Attempt {attempt+1} failed: {e}")
  23. if attempt < max_retries - 1:
  24. # Exponential backoff
  25. sleep_time = delay * (2 ** attempt) + random.uniform(0, 1) #add random jitter
  26. print(f"Retrying in {sleep_time:.2f} seconds...")
  27. time.sleep(sleep_time)
  28. else:
  29. print(f"Max retries reached for {url}. Giving up.")
  30. return False # Failed after all retries
  31. return False #Should not reach here unless max_retries is 0
  32. if __name__ == '__main__':
  33. test_url = "https://www.example.com" # Replace with your target URL
  34. success = retry_html_page(test_url)
  35. if success:
  36. print(f"Successfully processed {test_url}")
  37. else:
  38. print(f"Failed to process {test_url} after multiple retries.")

Add your comment