import scrapy # this example needs the scrapyjs package: pip install scrapyjs # it also needs a splash instance running in your env or on Scrapy Cloud (https://github.com/scrapinghub/splash) class SplashSpider(scrapy.Spider): name = 'splash-spider' download_delay = 3 def start_requests(self): yield scrapy.Request( 'http://quotes.toscrape.com/js', self.parse, meta={ 'splash': { 'endpoint': 'render.html', } } ) def parse(self, response): print response.body for quote in response.css('.quote'): yield { 'text': quote.css('span::text').extract_first(), 'author': quote.css('small::text').extract_first(), 'tags': quote.css('.tags a::text').extract(), }