当前位置:网站首页>Pyppeter crawler

Pyppeter crawler

2022-04-23 18:00:00 Round programmer

import asyncio
import pyppeteer
from user_agents import UA
from collections import namedtuple
Response = namedtuple("rs", "title url html cookies headers history status")
async def get_html(url, timeout=30):
    browser = await pyppeteer.launch(headless=True, args=['--no-sandbox'])
    page = await browser.newPage()
    await page.setUserAgent(UA)
    res = await page.goto(url, options={
    'timeout': int(timeout * 1000)})
    # stay while Forcibly query an element in the loop and wait 
    while not await page.querySelector('.share-box'):
        pass
    #  Scroll to the bottom of the page 
    await page.evaluate('window.scrollBy(0, window.innerHeight)')
    data = await page.content()
    title = await page.title()
    resp_cookies = await page.cookies()
    resp_headers = res.headers
    resp_history = None
    resp_status = res.status
    response = Response(
        title=title,
        url=url,
        html=data,
        cookies=resp_cookies,
        headers=resp_headers,
        history=resp_history,
        status=resp_status
    )
    return response

if __name__ == '__main__':
    url_list = [
                "http://gxt.hunan.gov.cn//gxt/xxgk_71033/czxx/201005/t20100528_2069234.html",
                "http://gxt.hunan.gov.cn//gxt/xxgk_71033/czxx/201005/t20100528_2069221.html",
                "http://gxt.hunan.gov.cn//gxt/xxgk_71033/czxx/200811/t20081111_2069210.html"
            ]
    task = (get_html(url) for url in url_list)
    loop = asyncio.get_event_loop()
    results = loop.run_until_complete(asyncio.gather(*task))
    for res in results:
        print(res.title)

版权声明
本文为[Round programmer]所创,转载请带上原文链接,感谢
https://yzsam.com/2022/04/202204230545315893.html