实际上爬虫一共就四个主要步骤:
1 明确目标(要知道你准备在哪个范围或网站去搜索)
2 爬(将所有的网站的内容全部爬下来)
3 处理数据(按照我们想要的方式存储和使用)
百度贴吧
https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=50 下一页 + 50
package main
import (
"fmt"
"net/http"
"os"
"strconv"
)
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err != nil {
err = err1
return
}
defer resp.Body.Close()
//读取网页body内容
buf := make([]byte, 1024*4)
for {
n, err := resp.Body.Read(buf)
if n == 0 { //读取结束,或者, 出问题
fmt.Println("resp.Body.Read err = ", err)
break
}
result += string(buf[:n])
}
return
}
func DoWork(start, end int) {
fmt.Printf("正在爬取 %d 到 %d 的页面\n", start, end)
//1 明确目标(要知道你准备在哪个范围或者网站去搜索)
//https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=0 下一页 + 50
for i := start; i <= end; i++ {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50) //返回i的base进制的字符串表示。go语言不像PHP等弱类型语言那么灵活
fmt.Println("url = ", url)
//2 爬(将所有的网站的内容全部爬下来)
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err = ", err)
continue
}
//把内容写入到文件
fileName := strconv.Itoa(i) + ".html"
f, err1 := os.Create(fileName)
if err1 != nil {
fmt.Println("os.Create = err1", err1)
continue
}
f.WriteString(result) //写内容
f.Close()
}
}
func main() {
var start, end int
fmt.Printf("请输入起始页( >= 1) :")
fmt.Scan(&start)
fmt.Printf("请输入终止页(>= 起始页)")
fmt.Scan(&end)
DoWork(start, end)
}
并发版网络爬虫
package main
import (
"fmt"
"net/http"
"os"
"strconv"
)
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err != nil {
err = err1
return
}
defer resp.Body.Close()
//读取网页body内容
buf := make([]byte, 1024*4)
for {
n, err := resp.Body.Read(buf)
if n == 0 { //读取结束,或者, 出问题
fmt.Println("resp.Body.Read err = ", err)
break
}
result += string(buf[:n])
}
return
}
//爬取一个网页
func SpiderPage(i int, page chan int) {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50) //go语言不能像PHP语言那么灵活
fmt.Printf("正在爬第%d页网页: %s\n", i, url)
//2 爬(将所有的网站的内容全部爬下来)
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err = ", err)
return
}
//把内容写入到文件
fileName := strconv.Itoa(i) + ".html"
f, err1 := os.Create(fileName)
if err1 != nil {
fmt.Println("os.Create = err1", err1)
return
}
f.WriteString(result) //写内容
f.Close()
page <- i //写入到管道
}
func DoWork(start, end int) {
fmt.Printf("正在爬取 %d 到 %d 的页面\n", start, end)
page := make(chan int) //新建管道
//1 明确目标(要知道你准备在哪个范围或者网站去搜索)
//https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=0 下一页 + 50
for i := start; i <= end; i++ {
go SpiderPage(i, page)
}
for i := start; i <= end; i++ { //此语句的作用防止主协同还未执行, 子协程已经退出
fmt.Printf("第%d个页面爬取完成\n", <-page) //读取管道的数据
}
}
func main() {
var start, end int
fmt.Printf("请输入起始页( >= 1) :")
fmt.Scan(&start)
fmt.Printf("请输入终止页(>= 起始页)")
fmt.Scan(&end)
DoWork(start, end)
}