爬虫技术不仅仅支持数据采集

laical1   发布于 2021年06月21日   阅读次数: 1610   

参考链接   Tag: 爬虫

爬虫抓数据只是爬虫技术的范围之一,爬虫技术可以增加流量和更多的用户,来提高公司的进展;个人可以利用爬虫技术获得额外收入,等等。
爬虫技术可以用于模拟登陆,养号,抓包分析等。现在的自媒体行业为了维护多个平台,都会靠爬虫技术去做支持。比如发文章,到各大自媒体平台,需要一篇篇去的发,而爬虫技术就可以模拟登陆,模拟真实用户去发文章,加快的业务的进展。必然靠爬虫技术去发帖,遇到最大的问题就是会被封IP,这时候就需要使用到爬虫代理IP去配合爬虫技术去模拟发文章。
可以采用Selenium框架去模拟真实用户登陆,而且限制也不是很严格。使用Selenium 打开浏览器,
如果没有验证码,可以通过代码输入用户名和密码并点击登录按钮,登陆之后采集数据保存即可。
    import os
    import time
    import zipfile

    from selenium import webdriver
    from selenium.common.exceptions import TimeoutException
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.ui import WebDriverWait


    class GenCookies(object):
        # 随机useragent
        USER_AGENT = open('useragents.txt').readlines()


        # 代理服务器(产品官网 www.16yun.cn)
        PROXY_HOST = 't.16yun.cn'  #  proxy or host
        PROXY_PORT = 31111  # port
        PROXY_USER = 'USERNAME'  # username
        PROXY_PASS = 'PASSWORD'  # password

        @classmethod
        def get_chromedriver(cls, use_proxy=False, user_agent=None):
            manifest_json = """
            {
                "version": "1.0.0",
                "manifest_version": 2,
                "name": "Chrome Proxy",
                "permissions": [
                    "proxy",
                    "tabs",
                    "unlimitedStorage",
                    "storage",
                    "<all_urls>",
                    "webRequest",
                    "webRequestBlocking"
                ],
                "background": {
                    "scripts": ["background.js"]
                },
                "minimum_chrome_version":"22.0.0"
            }
            """

            background_js = """
            var config = {
                    mode: "fixed_servers",
                    rules: {
                    singleProxy: {
                        scheme: "http",
                        host: "%s",
                        port: parseInt(%s)
                    },
                    bypassList: ["localhost"]
                    }
                };

            chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});

            function callbackFn(details) {
                return {
                    authCredentials: {
                        username: "%s",
                        password: "%s"
                    }
                };
            }

            chrome.webRequest.onAuthRequired.addListener(
                        callbackFn,
                        {urls: ["<all_urls>"]},
                        ['blocking']
            );
            """ % (cls.PROXY_HOST, cls.PROXY_PORT, cls.PROXY_USER, cls.PROXY_PASS)
            path = os.path.dirname(os.path.abspath(__file__))
            chrome_options = webdriver.ChromeOptions()

            # 关闭webdriver的一些标志
            # chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])        


            if use_proxy:
                pluginfile = 'proxy_auth_plugin.zip'

                with zipfile.ZipFile(pluginfile, 'w') as zp:
                    zp.writestr("manifest.json", manifest_json)
                    zp.writestr("background.js", background_js)
                chrome_options.add_extension(pluginfile)
            if user_agent:
                chrome_options.add_argument('--user-agent=%s' % user_agent)
            driver = webdriver.Chrome(
                os.path.join(path, 'chromedriver'),
                chrome_options=chrome_options)

            # 修改webdriver get属性
            # script = '''
            # Object.defineProperty(navigator, 'webdriver', {
            # get: () => undefined
            # })
            # '''
            # driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": script})

            return driver

        def __init__(self, username, password):        
            # 登录example网站
            self.url = 'https://passport.example.cn/signin/login?entry=example&r=https://m.example.cn/'
            self.browser = self.get_chromedriver(use_proxy=True, user_agent=self.USER_AGENT)
            self.wait = WebDriverWait(self.browser, 20)
            self.username = username
            self.password = password

        def open(self):
            """
            打开网页输入用户名密码并点击
            :return: None
            """
            self.browser.delete_all_cookies()
            self.browser.get(self.url)
            username = self.wait.until(EC.presence_of_element_located((By.ID, 'loginName')))
            password = self.wait.until(EC.presence_of_element_located((By.ID, 'loginPassword')))
            submit = self.wait.until(EC.element_to_be_clickable((By.ID, 'loginAction')))
            username.send_keys(self.username)
            password.send_keys(self.password)
            time.sleep(1)
            submit.click()

        def password_error(self):
            """
            判断是否密码错误
            :return:
            """
            try:
                return WebDriverWait(self.browser, 5).until(
                    EC.text_to_be_present_in_element((By.ID, 'errorMsg'), '用户名或密码错误'))
            except TimeoutException:
                return False

        def get_cookies(self):
            """
            获取Cookies
            :return:
            """
            return self.browser.get_cookies()

        def main(self):
            """
            入口
            :return:
            """
            self.open()
            if self.password_error():
                return {
                    'status': 2,
                    'content': '用户名或密码错误'
                }            

            cookies = self.get_cookies()
            return {
                'status': 1,
                'content': cookies
            }


    if __name__ == '__main__':
        result = GenCookies(
            username='180000000',
            password='16yun',
        ).main()
        print(result)