程序生成一张素描图。让自己也是一个素描“大师”。废话不多说,直接先来看看效果吧。
4 G' ?* n4 O' Q1 Y4 i" F
! v% B2 a" C$ D. N1. 流程分析% r2 c- C) E5 C" V t' p8 J9 U
w G3 e$ n+ e! m& V2. 具体实现0 m) Y0 n( M& o6 F+ ^
安装所需要的库:! {/ V4 n2 D% ` x. I
- pip install opencv-python
导入所需要的库:
; Q, W3 R; \3 l$ l) @
编写主体代码也是非常的简单的,代码如下:
. [' p2 ^5 Q# C3 l8 f- b8 W- import cv2
- SRC = 'images/image_1.jpg'
-
- image_rgb = cv2.imread(SRC)
- image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
- image_blur = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
- image_blend = cv2.divide(image_gray, image_blur, scale=255)
- cv2.imwrite('result.jpg', image_blend)
上面的代码其实并不难,接下来为了能更好的理解,编写如下代码:0 [/ H3 F# C$ I5 B3 x
- """
- project = 'Code', file_name = 'study.py', author = ''
- time = '2020/5/19 8:35', product_name = PyCharm
- code is far away from bugs with the god animal protecting
- I love animals. They taste delicious.
- """
- import cv2
-
- # 原图路径
- SRC = 'images/image_1.jpg'
-
- # 读取图片
- image_rgb = cv2.imread(SRC)
- # cv2.imshow('rgb', image_rgb) # 原图
- # cv2.waitKey(0)
- # exit()
- image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
- # cv2.imshow('gray', image_gray) # 灰度图
- # cv2.waitKey(0)
- # exit()
- image_bulr = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
- cv2.imshow('image_blur', image_bulr) # 高斯虚化
- cv2.waitKey(0)
- exit()
-
- # divide: 提取两张差别较大的线条和内容
- image_blend = cv2.divide(image_gray, image_bulr, scale=255)
- # cv2.imshow('image_blend', image_blend) # 素描
- cv2.waitKey(0)
- # cv2.imwrite('result1.jpg', image_blend)
上面的代码是在原有的基础上添加了一些实时展示的代码来理解,其实有人会问,我用软件不就可以直接生成素描图吗?那程序的好处是什么?程序的好处就是如果你的图片量多的话,这个时候使用程序批量生成也是非常方便高效的。
. \1 L1 o! Q; F/ w4 I/ F3. 百度图片爬虫+生成素描图
1 G/ g% P, D' V, Z 不过,这还不是我们的海量图片,为了达到海量这个词,写了一个百度图片爬虫,不过本文不是教如何写爬虫代码的,这里直接放出爬虫代码,符和软件工程规范:) w' z9 P: M+ J/ g3 ? c+ w
- # Crawler.Spider.py
- import re
- import os
- import time
- import collections
- from collections import namedtuple
-
- import requests
- from concurrent import futures
- from tqdm import tqdm
- from enum import Enum
-
- BASE_URL = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={keyword}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={keyword}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='
-
- HEADERS = {
- 'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_R&pv=&ic=0&nc=1&z=0&hd=0&latest=0©right=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E5%A3%81%E7%BA%B8',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
- 'X-Requested-With': 'XMLHttpRequest', }
-
-
- class BaiDuSpider:
- def __init__(self, max_works, images_type):
- self.max_works = max_works
- self.HTTPStatus = Enum('Status', ['ok', 'not_found', 'error'])
- self.result = namedtuple('Result', 'status data')
- self.session = requests.session()
- self.img_type = images_type
- self.img_num = None
- self.headers = HEADERS
- self.index = 1
-
- def get_img(self, img_url):
- res = self.session.get(img_url)
- if res.status_code != 200:
- res.raise_for_status()
-
- return res.content
-
- def download_one(self, img_url, verbose):
- try:
- image = self.get_img(img_url)
- except requests.exceptions.HTTPError as e:
- res = e.response
- if res.status_code == 404:
- status = self.HTTPStatus.not_found
- msg = 'not_found'
- else:
- raise
- else:
- self.save_img(self.img_type, image)
- status = self.HTTPStatus.ok
- msg = 'ok'
-
- if verbose:
- print(img_url, msg)
-
- return self.result(status, msg)
-
- def get_img_url(self):
- urls = [BASE_URL.format(keyword=self.img_type, page=page) for page in self.img_num]
- for url in urls:
- res = self.session.get(url, headers=self.headers)
- if res.status_code == 200:
- img_list = re.findall(r'"thumbURL":"(.*?)"', res.text)
- # 返回出图片地址,配合其他函数运行
- yield {img_url for img_url in img_list}
- elif res.status_code == 404:
- print('-----访问失败,找不到资源-----')
- yield None
- elif res.status_code == 403:
- print('*****访问失败,服务器拒绝访问*****')
- yield None
- else:
- print('>>> 网络连接失败 <<<')
- yield None
-
- def download_many(self, img_url_set, verbose=False):
- if img_url_set:
- counter = collections.Counter()
- with futures.ThreadPoolExecutor(self.max_works) as executor:
- to_do_map = {}
- for img in img_url_set:
- future = executor.submit(self.download_one, img, verbose)
- to_do_map[future] = img
- done_iter = futures.as_completed(to_do_map)
-
- if not verbose:
- done_iter = tqdm(done_iter, total=len(img_url_set))
- for future in done_iter:
- try:
- res = future.result()
- except requests.exceptions.HTTPError as e:
- error_msg = 'HTTP error {res.status_code} - {res.reason}'
- error_msg = error_msg.format(res=e.response)
- except requests.exceptions.ConnectionError:
- error_msg = 'ConnectionError error'
- else:
- error_msg = ''
- status = res.status
-
- if error_msg:
- status = self.HTTPStatus.error
-
- counter[status] += 1
-
- if verbose and error_msg:
- img = to_do_map[future]
- print('***Error for {} : {}'.format(img, error_msg))
- return counter
- else:
- pass
-
- def save_img(self, img_type, image):
- with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:
- f.write(image)
- self.index += 1
-
- def what_want2download(self):
- # self.img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')
|