import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
pagething = 0
while pagething <= 2700:
URL = f"https://www.dakkadakka.com/core/gallery-search.jsp?p=1&u=3309&dq=&ll=4&auction=0&unapproved=0&coolnesslow=0&coolnesshigh=10&paintjoblow=0&paintjobhigh=10&sort1=7&sort2=0&skip=90&en=&
st=&utype=&start={
str(pagething)}"
OUTPUT_DIR = "images"
os.makedirs(OUTPUT_DIR, exist_ok=True)
headers = {
"User-Agent": "Mozilla/5.0"
}
response = requests.get(URL, headers=headers)
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml"
images = soup.select("img[src*='images.dakkadakka.com/gallery']"
print(f"Found {len(images)} thumbnails"
for img in images:
thumb_url = img["src"]
full_url = thumb_url.replace("_mb-", "-"
filename = os.path.basename(urlparse(full_url).path)
filepath = os.path.join(OUTPUT_DIR, filename)
if os.path.exists(filepath):
continue
are = requests.get(full_url, headers=headers)
if r.status_code == 200:
with open(filepath, "wb"

as f:
f.write(r.content)
print(f"Downloaded: {filename}"
else:
print(f"Failed: {full_url}"
pagething += 90