NodeJS crawler

  node.js, question

Recently, I learned “The Great NodeJS”, which contains an example of crawling twitter data. Since twitter now seems to need authentication to crawl json data and does not know how to use it, the code has been modified to look like the following, but it will prompt you when it runs, and you don’t quite understand what is going on. First of all, thank you all (I opened the VPN, but I have already climbed over the wall)

events.js:141
 throw er;  // Unhandled 'error' event
 ^
 
 Error: connect ETIMEDOUT  104.244.42.65:80
 at Object.exports._errnoException (util.js:907:11)
 at exports._exceptionWithHostPort (util.js:930:20)
 at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1077:14)

The code is as follows:

var qs = require('querystring'),
 https = require('https');
 
 //node tweets.js haha   =>   process.argv = ['node','tweets.js','haha']
 var search = process.argv.slice(2).join(' ').trim()
 if(!  search.length){
 Return console.log ('\ nusage: nodetweets < searchterm > \ n')//use return to avoid executing the following code
 bracket
 
 var option = {
 host:"twitter.com",
 path:'/search?'  Plus qs.stringify({q:search}),
 method:"GET"
 bracket
 https.request(option,function(res){
 var body =''
 res.setEncoding('utf8')
 console.log('statusCode: ', res.statusCode);
 console.log('headers: ', res.headers);
 res.on('data',function (chunk) {
 Body plus =chunk
 })
 res.on('end',function () {
 // var obj = JSON.parse(body)
 console.log(body);
 })
 }).end()

Take Request Headers and try.