// This is a simple approach to downloading the data from our API // and processing it as JSON Object. This method is only sufficient for // small API responses (should work for responses below 100k results). // If your response contains more results, try having a look at this Stack Overflow QA: // http://stackoverflow.com/questions/15121584/how-to-parse-a-large-newline-delimited-json-file-by-jsonstream-module-in-node-j // We start by getting data from our API: let extractorRunId = "00000000-0000-0000-0000-000000000000"; let jsonFieldId = "00000000-0000-0000-0000-000000000001"; // You can get this value, by displaying https://store.import.io/store/crawlrun/${extractorRunId} in the browser, and getting the value from `json` field. let apiKey = "YOUR_API_KEY"; // You can get it here https://import.io/data/account/ let API_ENDPOINT = `https://store.import.io/store/crawlrun/${extractorRunId}/_attachment/json/${jsonFieldId}?_apikey=${apiKey}`; let response; // We are using the Fetch API, that is only available in newer browsers (details: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API). Try this code in Newest Chrome Console. fetch(API_ENDPOINT) .then(resp => { response = resp; return response.text(); }) .then(responseBody => { if(!response.ok) { throw responseBody; } return responseBody; }) .then(txt => { const jsonArray = JSON.parse(`[${txt}]`); console.log('First ten or less results: ' + JSON.stringify(jsonArray.slice(0, 10))); console.log('Total number of results: ' + jsonArray.length); }) .catch(error => console.error(error));