rm no used file

This commit is contained in:
Lyon 2023-09-20 14:54:20 +08:00
parent 29bb2f08d9
commit 73a7071bde
48 changed files with 0 additions and 166 deletions

View File

@ -1,2 +0,0 @@
abcdefg
真好 好吃的不得了

View File

@ -1 +0,0 @@
20191512郭佳宝

View File

@ -1 +0,0 @@
20191928任祉涵

View File

@ -1,10 +0,0 @@
public class firstClass {
public static void main(String[] args) {
System.out.print("hello Lyon");
}
}

View File

@ -1 +0,0 @@
丁晨20192123网络编程实验课.txt

View File

@ -1 +0,0 @@
周琛祺-20192124-网络编程实验课

View File

@ -1 +0,0 @@
A desire to throw over reality a light that never was might give way abruptly to the desire on the part of what we might consider a novelist-scientist to record exactly and concretely the structure and texture of a flower.

View File

@ -1 +0,0 @@
孙世豪-20192220-网络编程实验

View File

@ -1 +0,0 @@
20191016 宋明绅

View File

@ -1,3 +0,0 @@
111
222
333

View File

@ -1,2 +0,0 @@
This is Zhang Xing's First Pr!
I did it!!!

View File

@ -1,2 +0,0 @@
35班张宇
学号20191623

View File

@ -1,102 +0,0 @@
# 导入selenium包
import os.path
import pickle
import re
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
# 初始化一个引用计数,用于后面的图片简单命名
index = 0
# 设置代理服务器端口
proxies = {'https':'http://127.0.0.1:10900'}
next_url = ''
# 定义爬虫方法
def geturl(url):
# 创建浏览器对象
browser = webdriver.Firefox(executable_path="D:\GeckoDriver\geckodriver.exe")
# 设置要爬取的网站
browser.get(url)
# css获得对应html形成列表
s = browser.find_elements_by_css_selector("div[class='post-preview-container'] a")
pic = s[:]
for i in pic:
# print(i)
huoqvpicture(str(i.get_attribute("href")))
sleep(1)
print("翻页")
# 获取下一页的链接
link = browser.find_elements_by_css_selector("div[class='paginator numbered-paginator mt-8 mb-4 space-x-2 flex justify-center items-center'] a")
# print(str(link[-1].get_attribute("href")))
# 将下一页链接的值赋给next_url
next_url = str(link[-1].get_attribute("href"))
browser.close()
# print(next_url)
geturl(next_url)
def huoqvpicture(url):
browser = webdriver.Firefox(executable_path="D:\GeckoDriver\geckodriver.exe")
# global index
browser.get(url)
n = browser.find_elements_by_css_selector("section[id='content'] section")
try:
s = browser.find_elements_by_css_selector("div[class='notice notice-small post-notice post-notice-resized'] a")
print(str(s[-1].get_attribute("href")))
DownLoadPicture(str(s[-1].get_attribute("href")), str(n[0].get_attribute("data-id")))
# index = 0
except:
p = browser.find_elements_by_css_selector("section[class='image-container note-container blacklisted'] picture img")
print(str(p[-1].get_attribute("src")))
DownLoadPicture(str(p[-1].get_attribute("src")), str(n[0].get_attribute("data-id")))
# index = 1
# 确定图片格式
# print(index)
# 定位original picture链接所在位置
# print(s[-1].get_attribute("src"))
# 打印链接地址
#图片名称
sleep(1)
print(str(n[0].get_attribute("data-id")))
browser.close()
def DownLoadPicture(url,name):
root = "./picture/"
path1 =root + name + '.jpg'
path2 =root + name + '.png'
# 若文件是.jpg 格式
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path1):
sleep(1)
r = requests.get(url,proxies=proxies)
print(r.status_code)
with open(path1, 'wb') as f:
f.write(r.content)
f.close()
print("文件保存成功")
else:
print("文件已存在")
# 若文件是.png 格式
except:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path2):
sleep(1)
r = requests.get(url, proxies=proxies)
print(r.status_code)
with open(path2, 'wb') as f:
f.write(r.content)
f.close()
print("文件保存成功")
else:
print("文件已存在")
sleep(1)
if __name__ == "__main__":
url = "https://danbooru.donmai.us/"
geturl(url)

View File

@ -1 +0,0 @@
很棒的项目!

View File

@ -1 +0,0 @@
杨公博20190626

View File

@ -1 +0,0 @@
死亦何哀

View File

@ -1,4 +0,0 @@
计算机网络编程综合实验
王薪陶-20190828-网络编程实验

View File

@ -1,2 +0,0 @@
祁延-20191009-网络编程实验
The world is my oyster.

View File

@ -1 +0,0 @@
大佬牛的

View File

@ -1 +0,0 @@
董淑泽-23190418-网络编程实验课

View File

@ -1 +0,0 @@
20190829蒋正豪网络编程实验课

View File

@ -1,2 +0,0 @@
网络编程综合实验4
38班20191112赵培森

View File

@ -1,2 +0,0 @@
网络编程实验4
2019级34班20170907郭佳琪

View File

@ -1 +0,0 @@
国庆快乐