-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGoogleImageCrawler.py
79 lines (63 loc) · 1.72 KB
/
GoogleImageCrawler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import sys, os
from bs4 import BeautifulSoup
from selenium import webdriver
import urllib, urllib.request
import requests
import random
import time
from selenium.webdriver.common.keys import Keys
###initial set
folder = "./image/"
url = "https://www.google.com/search"
webDriver = "D:/chromedriver_win32/chromedriver.exe"
searchItem = "정면사진"
size = 20
params = {
"q": searchItem
, "tbm": "isch"
, "sa": "1"
, "source": "lnms&tbm=isch"
}
url = url + "?" + urllib.parse.urlencode(params)
browser = webdriver.Chrome(webDriver)
time.sleep(0.5)
browser.get(url)
html = browser.page_source
time.sleep(0.5)
'''
### get number of image for a page
soup_temp = BeautifulSoup(html,'html.parser')
img4page = len(soup_temp.findAll("img"))
### page down
elem = browser.find_element_by_tag_name("body")
imgCnt =0
while imgCnt < size*10:
elem.send_keys(Keys.PAGE_DOWN)
rnd = random.random()
print(imgCnt)
time.sleep(rnd)
imgCnt+=img4page
'''
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
img = soup.findAll("img")
browser.find_elements_by_tag_name('img')
fileNum = 0
srcURL = []
for line in img:
if str(line).find('data-src') != -1 and str(line).find('http') < 100:
print(fileNum, " : ", line['data-src'])
srcURL.append(line['data-src'])
fileNum += 1
### make folder and save picture in that directory
saveDir = folder + searchItem
try:
if not (os.path.isdir(saveDir)):
os.makedirs(os.path.join(saveDir))
except OSError as e:
if e.errno != errno.EEXIST:
print("Failed to create directory!!!!!")
raise
for i, src in zip(range(fileNum), srcURL):
urllib.request.urlretrieve(src, saveDir + "/" + str(i) + ".jpg")
print(i, "saved")