import webtranspose as webtcrawl = webt.Crawl("https://www.example.com",max_pages=100,render_js=True,api_key="YOUR_WEBTRANSPOSE_API_KEY", # optional, if you want to run on cloud)await crawl.crawl() # crawl.queue_crawl() for async
Download the Crawled Data (1 line of code)
Copy
# download to local diskcrawl.download()
Access the crawled data (1 line of code)
Copy
# get page datapage_urls = crawl.get_visited()for x in page_urls:print(crawl.get_page(x))
import webtranspose as webtschema = {"Merchant Name": "string","Title of Product": "string","Product Photo URL": "string",}scraper = webt.Scraper(schema, render_js=True, api_key="YOUR_WEBTRANSPOSE_API_KEY", # optional, if you want to run on cloud)