QQ登录

只需要一步,快速开始

APP扫码登录

只需要一步,快速开始

手机号码,快捷登录

手机号码,快捷登录

查看: 3600|回复: 0

[Python] Python使用5行代码批量做小姐姐的素描图

[复制链接]

等级头衔

积分成就    金币 : 2841
   泡泡 : 1516
   精华 : 6
   在线时间 : 1294 小时
   最后登录 : 2024-11-21

丰功伟绩

优秀达人突出贡献荣誉管理论坛元老

联系方式
发表于 2021-7-14 22:51:40 | 显示全部楼层 |阅读模式
       程序生成一张素描图。让自己也是一个素描“大师”。废话不多说,直接先来看看效果吧。
4 G' ?* n4 O' Q1 Y4 i" F 1.jpg 2.jpg
! v% B2 a" C$ D. N1. 流程分析% r2 c- C) E5 C" V  t' p8 J9 U
3.jpg
  w  G3 e$ n+ e! m& V2. 具体实现0 m) Y0 n( M& o6 F+ ^
安装所需要的库:! {/ V4 n2 D% `  x. I
  1. pip install opencv-python
导入所需要的库:
; Q, W3 R; \3 l$ l) @
  1. import cv2
编写主体代码也是非常的简单的,代码如下:
. [' p2 ^5 Q# C3 l8 f- b8 W
  1. import cv2
  2. SRC = 'images/image_1.jpg'
  3. image_rgb = cv2.imread(SRC)
  4. image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
  5. image_blur = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
  6. image_blend = cv2.divide(image_gray, image_blur, scale=255)
  7. cv2.imwrite('result.jpg', image_blend)
上面的代码其实并不难,接下来为了能更好的理解,编写如下代码:0 [/ H3 F# C$ I5 B3 x
  1. """
  2. project = 'Code', file_name = 'study.py', author = ''
  3. time = '2020/5/19 8:35', product_name = PyCharm
  4. code is far away from bugs with the god animal protecting
  5.     I love animals. They taste delicious.
  6. """
  7. import cv2
  8. # 原图路径
  9. SRC = 'images/image_1.jpg'
  10. # 读取图片
  11. image_rgb = cv2.imread(SRC)
  12. # cv2.imshow('rgb', image_rgb) # 原图
  13. # cv2.waitKey(0)
  14. # exit()
  15. image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
  16. # cv2.imshow('gray', image_gray) # 灰度图
  17. # cv2.waitKey(0)
  18. # exit()
  19. image_bulr = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
  20. cv2.imshow('image_blur', image_bulr) # 高斯虚化
  21. cv2.waitKey(0)
  22. exit()
  23. # divide: 提取两张差别较大的线条和内容
  24. image_blend = cv2.divide(image_gray, image_bulr, scale=255)
  25. # cv2.imshow('image_blend', image_blend) # 素描
  26. cv2.waitKey(0)
  27. # cv2.imwrite('result1.jpg', image_blend)
     上面的代码是在原有的基础上添加了一些实时展示的代码来理解,其实有人会问,我用软件不就可以直接生成素描图吗?那程序的好处是什么?程序的好处就是如果你的图片量多的话,这个时候使用程序批量生成也是非常方便高效的。
. \1 L1 o! Q; F/ w4 I/ F3. 百度图片爬虫+生成素描图
1 G/ g% P, D' V, Z       不过,这还不是我们的海量图片,为了达到海量这个词,写了一个百度图片爬虫,不过本文不是教如何写爬虫代码的,这里直接放出爬虫代码,符和软件工程规范:) w' z9 P: M+ J/ g3 ?  c+ w
  1. # Crawler.Spider.py
  2. import re
  3. import os
  4. import time
  5. import collections
  6. from collections import namedtuple
  7. import requests
  8. from concurrent import futures
  9. from tqdm import tqdm
  10. from enum import Enum
  11. BASE_URL = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={keyword}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={keyword}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='
  12. HEADERS = {
  13. 'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_R&pv=&ic=0&nc=1&z=0&hd=0&latest=0©right=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E5%A3%81%E7%BA%B8',
  14. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
  15. 'X-Requested-With': 'XMLHttpRequest', }
  16. class BaiDuSpider:
  17. def __init__(self, max_works, images_type):
  18.   self.max_works = max_works
  19.   self.HTTPStatus = Enum('Status', ['ok', 'not_found', 'error'])
  20.   self.result = namedtuple('Result', 'status data')
  21.   self.session = requests.session()
  22.   self.img_type = images_type
  23.   self.img_num = None
  24.   self.headers = HEADERS
  25.   self.index = 1
  26. def get_img(self, img_url):
  27.   res = self.session.get(img_url)
  28.   if res.status_code != 200:
  29.    res.raise_for_status()
  30.   
  31.   return res.content
  32. def download_one(self, img_url, verbose):
  33.   try:
  34.    image = self.get_img(img_url)
  35.   except requests.exceptions.HTTPError as e:
  36.    res = e.response
  37.    if res.status_code == 404:
  38.     status = self.HTTPStatus.not_found
  39.     msg = 'not_found'
  40.    else:
  41.     raise
  42.   else:
  43.    self.save_img(self.img_type, image)
  44.    status = self.HTTPStatus.ok
  45.    msg = 'ok'
  46.   
  47.   if verbose:
  48.    print(img_url, msg)
  49.   
  50.   return self.result(status, msg)
  51. def get_img_url(self):
  52.   urls = [BASE_URL.format(keyword=self.img_type, page=page) for page in self.img_num]
  53.   for url in urls:
  54.    res = self.session.get(url, headers=self.headers)
  55.    if res.status_code == 200:
  56.     img_list = re.findall(r'"thumbURL":"(.*?)"', res.text)
  57.     # 返回出图片地址,配合其他函数运行
  58.     yield {img_url for img_url in img_list}
  59.    elif res.status_code == 404:
  60.     print('-----访问失败,找不到资源-----')
  61.     yield None
  62.    elif res.status_code == 403:
  63.     print('*****访问失败,服务器拒绝访问*****')
  64.     yield None
  65.    else:
  66.     print('>>> 网络连接失败 <<<')
  67.     yield None
  68. def download_many(self, img_url_set, verbose=False):
  69.   if img_url_set:
  70.    counter = collections.Counter()
  71.    with futures.ThreadPoolExecutor(self.max_works) as executor:
  72.     to_do_map = {}
  73.     for img in img_url_set:
  74.      future = executor.submit(self.download_one, img, verbose)
  75.      to_do_map[future] = img
  76.     done_iter = futures.as_completed(to_do_map)
  77.    
  78.    if not verbose:
  79.     done_iter = tqdm(done_iter, total=len(img_url_set))
  80.    for future in done_iter:
  81.     try:
  82.      res = future.result()
  83.     except requests.exceptions.HTTPError as e:
  84.      error_msg = 'HTTP error {res.status_code} - {res.reason}'
  85.      error_msg = error_msg.format(res=e.response)
  86.     except requests.exceptions.ConnectionError:
  87.      error_msg = 'ConnectionError error'
  88.     else:
  89.      error_msg = ''
  90.      status = res.status
  91.    
  92.     if error_msg:
  93.      status = self.HTTPStatus.error
  94.    
  95.     counter[status] += 1
  96.    
  97.     if verbose and error_msg:
  98.      img = to_do_map[future]
  99.      print('***Error for {} : {}'.format(img, error_msg))
  100.    return counter
  101.   else:
  102.    pass
  103. def save_img(self, img_type, image):
  104.   with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:
  105.    f.write(image)
  106.   self.index += 1
  107. def what_want2download(self):
  108.   # self.img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')
  109.   try:
  110.    os.mkdir(self.img_type)
  111.   except FileExistsError:
  112.    pass
  113.   img_num = input('请输入要下载的数量(1位数代表30张,列如输入1就是下载30张,2就是60张):>>> ')
  114.   while True:
  115.    if img_num.isdigit():
  116.     img_num = int(img_num) * 30
  117.     self.img_num = range(30, img_num + 1, 30)
  118.     break
  119.    else:
  120.     img_num = input('输入错误,请重新输入要下载的数量>>> ')
  121. def main(self):
  122.   # 获取图片类型和下载的数量
  123.   total_counter = {}
  124.   self.what_want2download()
  125.   for img_url_set in self.get_img_url():
  126.    if img_url_set:
  127.     counter = self.download_many(img_url_set, False)
  128.     for key in counter:
  129.      if key in total_counter:
  130.       total_counter[key] += counter[key]
  131.      else:
  132.       total_counter[key] = counter[key]
  133.    
  134.    else:
  135.     # 可以为其添加报错功能
  136.     pass
  137.   
  138.   time.sleep(.5)
  139.   return total_counter
  140. if __name__ == '__main__':
  141. max_works = 20
  142. bd_spider = BaiDuSpider(max_works)
  143. print(bd_spider.main())
  1. # Sketch_the_generated_code.py
  2. import cv2
  3. def drawing(src, id=None):
  4. image_rgb = cv2.imread(src)
  5. image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
  6. image_blur = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
  7. image_blend = cv2.divide(image_gray, image_blur, scale=255)
  8. cv2.imwrite(f'Drawing_images/result-{id}.jpg', image_blend)
  1. # image_list.image_list_path.py
  2. import os
  3. from natsort import natsorted
  4. IMAGES_LIST = []
  5. def image_list(path):
  6. global IMAGES_LIST
  7. for root, dirs, files in os.walk(path):
  8.   # 按文件名排序
  9.   # files.sort()
  10.   files = natsorted(files)
  11.   # 遍历所有文件
  12.   for file in files:
  13.    # 如果后缀名为 .jpg
  14.    if os.path.splitext(file)[1] == '.jpg':
  15.     # 拼接成完整路径
  16.     # print(file)
  17.     filePath = os.path.join(root, file)
  18.     print(filePath)
  19.     # 添加到数组
  20.     IMAGES_LIST.append(filePath)
  21. return IMAGES_LIST
  1. # main.py
  2. import time
  3. from Sketch_the_generated_code import drawing
  4. from Crawler.Spider import BaiDuSpider
  5. from image_list.image_list_path import image_list
  6. import os
  7. MAX_WORDS = 20
  8. if __name__ == '__main__':
  9. # now_path = os.getcwd()
  10. # img_type = 'ai'
  11. img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')
  12. bd_spider = BaiDuSpider(MAX_WORDS, img_type)
  13. print(bd_spider.main())
  14. time.sleep(10) # 这里设置睡眠时间,让有足够的时间去添加,这样读取就,去掉或者太短会报错,所以
  15. for index, path in enumerate(image_list(img_type)):
  16.   drawing(src = path, id = index)
所以最终的目录结构如下所示:
! ]; s4 u& a2 w 4.jpg
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

QQ|手机版|小黑屋|paopaomj.COM ( 渝ICP备18007172号|渝公网安备50010502503914号 )

GMT+8, 2024-11-21 21:03

Powered by paopaomj X3.5 © 2016-2025 sitemap

快速回复 返回顶部 返回列表