-
Notifications
You must be signed in to change notification settings - Fork 0
/
spider.php
94 lines (73 loc) · 3.03 KB
/
spider.php
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
<?php
// It may take a whils to crawl a site ...
set_time_limit(10000);
// Inculde the phpcrawl-mainclass
require_once("PHPCrawl/libs/PHPCrawler.class.php");
// Include the elastic search lib
require_once('vendor/autoload.php');
// Include DOM php lib
require_once('simple_html_dom.php');
$client = new Elasticsearch\Client();
// Extend the class and override the handleDocumentInfo()-method
class MyCrawler extends PHPCrawler
{
function handleDocumentInfo($DocInfo)
{
// Just detect linebreak for output ("\n" in CLI-mode, otherwise "<br>").
if (PHP_SAPI == "cli") $lb = "\n";
else $lb = "<br />";
// Print the URL and the HTTP-status-Code
echo "Page requested: ".$DocInfo->url." (".$DocInfo->http_status_code.")".$lb;
// Print the refering URL
echo "Referer-page: ".$DocInfo->referer_url.$lb;
// Print if the content of the document was be recieved or not
if ($DocInfo->received == true){
global $client;
echo "Content received: ".$DocInfo->bytes_received." bytes".$lb;
$params = array();
$html = str_get_html(iconv("big5","UTF-8",$DocInfo->content));
$params['body'] = array('content' => implode(" ", $html->find('text')),
'url' => iconv("big5","UTF-8",$DocInfo->url)
);
$params['index'] = 'page';
$params['type'] = 'tw';
$params['id'] = hash("md5",iconv("big5","UTF-8",$DocInfo->url));
$ret = $client->index($params);
echo "indexed: ".var_dump($ret);
}
else
echo "Content not received".$lb;
// Now you should do something with the content of the actual
// received page or file ($DocInfo->source), we skip it in this example
echo $lb;
flush();
}
}
// Now, create a instance of your class, define the behaviour
// of the crawler (see class-reference for more options and details)
// and start the crawling-process.
$crawler = new MyCrawler();
// URL to crawl
$crawler->setURL("http://www.rclaw.com.tw/");
// Only receive content of files with content-type "text/html"
$crawler->addContentTypeReceiveRule("#text/html#");
// Ignore links to pictures, dont even request pictures
$crawler->addURLFilterRule("#\.(jpg|jpeg|gif|png)$# i");
// Store and send cookie-data like a browser does
$crawler->enableCookieHandling(true);
// Set the traffic-limit to 1 MB (in bytes,
// for testing we dont want to "suck" the whole site)
$crawler->setTrafficLimit(1000 * 1024);
// Thats enough, now here we go
$crawler->go();
// At the end, after the process is finished, we print a short
// report (see method getProcessReport() for more information)
$report = $crawler->getProcessReport();
if (PHP_SAPI == "cli") $lb = "\n";
else $lb = "<br />";
echo "Summary:".$lb;
echo "Links followed: ".$report->links_followed.$lb;
echo "Documents received: ".$report->files_received.$lb;
echo "Bytes received: ".$report->bytes_received." bytes".$lb;
echo "Process runtime: ".$report->process_runtime." sec".$lb;
?>