Я пытаюсь сканировать страницы содержимого с помощью Apache HttpClient. Я получаю статус 200 при запросе следующей страницы по ссылке из нумерации страниц, но HTML показывает 500 в теле и без содержимого. Почтальон работает отлично и получает контент даже при использовании ссылок с нумерации страниц.
Основной класс
public static void main(String[] args) {
String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
MyContentFetcher myContentFetcher = new MyContentFetcher();
MyParser myParser = new MyParser();
try {
// Load Topic list page
String topicsPage = myContentFetcher.loadHTML(url);
// Getting all the topics.
Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);
// Print all the topics and ask user to choose one
for (int id : topics.keySet())
System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
System.out.println("********************");
System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String id = reader.readLine();
// Validate user input, get the link and topic and cout the choice.
if (isNumber(id)) {
int idNum = Integer.parseInt(id);
if (idNum <= topics.size() && idNum > 0) {
String topic = topics.get(idNum).getTopic();
String link = topics.get(idNum).getLink();
System.out.println("You picked: " + topic + link + "\n***************************");
// Loading first page of reviews
myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
// Getting links to other pages
Queue<String> paginationLinks = myParser.getLinks();
// --------------> WORKS FINE UNTIL HERE <--------------
// Problem starts here....
// Load list of reviews for chosen topic
while(!paginationLinks.isEmpty()) {
String page = myContentFetcher.loadHTML(paginationLinks.remove());
myParser.loadReviews(page, topic);
}
}
}
System.out.println("Exiting...");
} catch (IOException e) {
System.out.println("There was a problem...");
}
!!!! Вот класс, который выбирает HTML. Я, наверное, здесь что-то не так делаю ...
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import java.io.IOException;
import java.util.Scanner;
public class MyContentFetcher {
public MyContentFetcher() {
}
String loadHTML(String url) throws IOException {
// Create configurations for
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.setCookieSpec(CookieSpecs.STANDARD)
.build();
// Creating a HttpClient object
CloseableHttpClient httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.build();
// Creating a HttpGet object
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
Scanner sc = new Scanner(httpResponse.getEntity().getContent());
StringBuilder page = new StringBuilder("");
while(sc.hasNext())
page.append(sc.nextLine()).append(" ");
httpResponse.close();
httpClient.close();
return page.toString();
}
}
Вот парсер. С парсером проблем не возникает (разбирается отлично и при необходимости)
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
public class MyParser {
private Map<String, String> topics;
private Document htmlPage;
private Element reviewBlock;
public MyParser(){}
// Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
public Map<Integer, MyNode> getTopicList(String page) {
Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
htmlPage = Jsoup.parse(page);
// Get 'a' element that is inside 'li' with a class name of browse-by-list-item
int i = 1;
MyNode info;
for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
info = new MyNode(element.select("button").text(),
element.select("a").attr("href").trim());
topics.put(i, info);
i++;
}
return topics;
}
// Loads Reviews
public void loadReviews(String page, String topic) throws IOException {
htmlPage = Jsoup.parse(page);
// Get all review blocks
System.out.println("**************************\n" + page + "\n**************************\n");
for(Element element : htmlPage.body().select(".search-results-item-body")){
reviewBlock = element;
String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
System.out.println(review);
}
}
Queue<String> getLinks(){
System.out.println("GETTING LINKS");
Queue<String> links = new LinkedList<>();
for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
links.add(element.attr("href"));
}
return links;
}
private String getLink(){
return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
}
public String getTitle(){
return reviewBlock.selectFirst("a").text();
}
public String getAuthor(){
return reviewBlock.selectFirst("div.search-result-authors").text();
}
public String getDate(){
String result = reviewBlock.select("div.search-result-date > div").text();
try {
SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
Date d = fmt.parse(result);
fmt.applyPattern("yyyy-MM-dd");
result = fmt.format(d);
} catch (ParseException e) {
System.out.println("Failed parsing the date...");
}
return result;
}