Scrapy, I want to simulate the landing day to check the website, that site to slide alignment verification, what can I do to simulate the success of landing?

this is the core code of my simulated login:

def __init__(self):
        dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)  -sharp userAgent
        -sharp dcap[
        -sharp     "phantomjs.page.settings.userAgent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0"
        self.driver = webdriver.PhantomJS(
            executable_path="C:\\Users\\gt\\Desktop\\tutorial\\phantomjs.exe",
            desired_capabilities=dcap)

        self.driver.maximize_window()

def start_requests(self):
        print("start request!!!")
        yield scrapy.Request(self.login_url, callback=self.parse)

def parse(self, response):
        print("parse!!!")

        self.driver.get(response.url)
        self.set_sleep_time()
        -sharp print(self.driver.page_source)
        self.driver.find_element_by_xpath("//*[@id="web-content"]/div/div[2]/div/div[2]/div/div[3]/div[1]/div[1]").click()
        print("CLICK LEFT")
        time.sleep(1)
        temp = self.driver.find_element_by_xpath("//*[@id="web-content"]/div/div[2]/div/div[2]/div/div[3]/div[3]/div[2]/input")
        temp.click()
        temp.send_keys(PHONE)
        print("PHONE SENT")
        self.driver.find_element_by_xpath("//*[@id="web-content"]/div/div[2]/div/div[2]/div/div[3]/div[1]/div[2]").click()
        print("CLICK RIGHT")
        time.sleep(5)
        temp2 = self.driver.find_element_by_xpath("//*[@id="web-content"]/div/div[2]/div/div[2]/div/div[3]/div[2]/div[3]/input")
        temp2.click()
        temp2.send_keys(PASSWORD)
        print("PASSWORD SENT")
        self.driver.find_element_by_xpath("//*[@id="web-content"]/div/div[2]/div/div[2]/div/div[3]/div[2]/div[5]").click()
        self.set_sleep_time()
        time.sleep(3)
        -sharp print self.driver.page_source
        print("")
        cookies = self.driver.get_cookies()
        -sharp print(cookies)

        f = open("data/url_list.txt", mode="r", encoding="utf-8")
        for line in f.readlines():
            url = str(line.replace("\r", "").replace("\n", "").replace("=", ""))
            print(url)
            time.sleep(1)
            print("1...............")
            requests = scrapy.Request(url, cookies=cookies,
                                      callback=self.sub_parse)
            yield requests

you can manually perform slide verification, when the crawler pauses;
waits for manual processing before the crawler continues.

Menu