Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion cfscrape/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def create_scraper(cls, sess=None, **kwargs):
## Functions for integrating cloudflare-scrape with other applications and scripts

@classmethod
def get_tokens(cls, url, user_agent=None, **kwargs):
def get_tokens(cls, url, user_agent=None, curl_cookie_file=None, **kwargs):
scraper = cls.create_scraper()
if user_agent:
scraper.headers["User-Agent"] = user_agent
Expand All @@ -189,6 +189,20 @@ def get_tokens(cls, url, user_agent=None, **kwargs):
break
else:
raise ValueError("Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM (\"I'm Under Attack Mode\") enabled?")

if curl_cookie_file:
curl_cookies = []
for cookie in scraper.cookies:
cookie_dict = cookie.__dict__
if(cookie_dict['domain'] == cookie_domain):
# http://www.cookiecentral.com/faq/#3.5
curl_cookies.append('\t'.join(
[cookie_dict['domain'], 'TRUE', cookie_dict['path'], 'TRUE' if cookie_dict['secure'] else 'FALSE',
str(cookie_dict['expires']), cookie_dict['name'], cookie_dict['value']]
))
text_file = open(curl_cookie_file, "w")
text_file.write('\n'.join(curl_cookies) + '\n')
text_file.close()

return ({
"__cfduid": scraper.cookies.get("__cfduid", "", domain=cookie_domain),
Expand Down