Im trying to scrape this xml page for links by keywords but urllib2 is throwing me errors that I can't fix on python3...
from bs4 import BeautifulSoup
import requests
import smtplib
import urllib2
from lxml import etree
url = 'https://store.fabspy.com/sitemap_products_1.xml?from=5619742598&to=9172987078'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
proxies = {'https': '209.212.253.44'}
req = urllib2.Request(url, headers=hdr, proxies=proxies)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print(e.fp.read())
content = page.read()
def parse(self, response):
try:
print(response.status)
print('???????????????????????????????????')
if response.status == 200:
self.driver.implicitly_wait(5)
self.driver.get(response.url)
print(response.url)
print('!!!!!!!!!!!!!!!!!!!!')
# DO STUFF
except httplib.BadStatusLine:
pass
while True:
soup = BeautifulSoup(a.context, 'lxml')
links = soup.find_all('loc')
for link in links:
if 'notonesite' and 'winter' in link.text:
print(link.text)
jake = link.text
I am simply trying to send a urllib request through the proxy to see if the link is on the sitemap...
urllib2
is not available in Python3. You should be using urllib.error
and urllib.request
:
import urllib.request
import urllib.error
...
req = (url, headers=hdr) # doesn't take a proxies argument though...
...
try:
page = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
...
...and so on. Notice however that urllib.request.Request()
does not take a proxies
argument. For proxy handling please refer to the documentation.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With