This repository was archived by the owner on Mar 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathzipcheck.py
More file actions
50 lines (43 loc) · 1.54 KB
/
zipcheck.py
File metadata and controls
50 lines (43 loc) · 1.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import random
import urllib.request
import webbrowser
import threading
# Define the number of links to process in each batch
BATCH_SIZE = 500
def check_urls_batch(urls):
for url in urls:
check_url(url)
def check_url(url, count=0):
try:
urllib.request.urlopen("https://" + url, timeout=5)
print("Found a working link: " + url)
with open("workinglinks.txt", "a") as file:
file.write(url + "\n")
except urllib.error.HTTPError as e:
print(f"{url} failed with the following error on attempt {count + 1}:")
print(e)
if count >= 2:
return
if str(e).startswith("HTTP Error 302"):
with open("workinglinks.txt", "a") as file:
file.write(url + "\n")
else:
check_url(url, count=count + 1)
except:
pass
def main():
h = urllib.request.urlopen("https://raw.githubusercontent.com/trickest/zip/main/zip-domains.txt")
urls = [line.strip().decode('utf-8') for line in h]
# Split the list of URLs into batches of BATCH_SIZE
url_batches = [urls[i:i + BATCH_SIZE] for i in range(0, len(urls), BATCH_SIZE)]
threads = []
for url_batch in url_batches:
thread = threading.Thread(target=check_urls_batch, args=(url_batch,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
with open("workinglinks.txt", "a") as file:
file.write("yourmom.zip\nyourdad.zip\nyoursister.zip\ndb0.zip\nnudes.zip\napt.nudes.zip")
if __name__ == "__main__":
main()