Skip to content

Commit

Permalink
update 0.0.6.0
Browse files Browse the repository at this point in the history
  • Loading branch information
HibiKier committed Nov 4, 2021
1 parent 19d650c commit c802c44
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 14 deletions.
1 change: 1 addition & 0 deletions basic_plugins/help/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
help_image.unlink()
if simple_help_image.exists():
simple_help_image.unlink()
group_help_path.mkdir(exist_ok=True, parents=True)
for x in os.listdir(group_help_path):
group_help_image = group_help_path / x
group_help_image.unlink()
Expand Down
14 changes: 14 additions & 0 deletions plugins/pixiv_rank_search/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,27 @@
"cmd": ["p站排行", "搜图", "p站搜图", "P站搜图"],
}
__plugin_block_limit__ = {"rst": "P站排行榜或搜图正在搜索,请不要重复触发命令..."}
__plugin_configs__ = {
"TIMEOUT": {
"value": 10,
"help": "图片下载超时限制",
"default_value": 10
}
}
Config.add_plugin_config(
"hibiapi",
"HIBIAPI",
"https://api.obfs.dev",
help_="如果没有自建或其他hibiapi请不要修改",
default_value="https://api.obfs.dev",
)
Config.add_plugin_config(
"pixiv",
"PIXIV_NGINX_URL",
"i.pixiv.re",
help_="Pixiv反向代理"
)


rank_dict = {
"1": "day",
Expand Down
37 changes: 25 additions & 12 deletions plugins/pixiv_rank_search/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ async def get_pixiv_urls(
if date:
params["date"] = date
hibiapi = Config.get_config("hibiapi", "HIBIAPI")
hibiapi = hibiapi[:-1] if hibiapi[-1] == '/' else hibiapi
hibiapi = hibiapi[:-1] if hibiapi[-1] == "/" else hibiapi
rank_url = f"{hibiapi}/api/pixiv/rank"
return await parser_data(rank_url, num, params, 'rank')
return await parser_data(rank_url, num, params, "rank")


async def search_pixiv_urls(
Expand All @@ -52,14 +52,16 @@ async def search_pixiv_urls(
:param page: 页数
:param r18: 是否r18
"""
params = {"word": keyword, 'page': page}
params = {"word": keyword, "page": page}
hibiapi = Config.get_config("hibiapi", "HIBIAPI")
hibiapi = hibiapi[:-1] if hibiapi[-1] == '/' else hibiapi
hibiapi = hibiapi[:-1] if hibiapi[-1] == "/" else hibiapi
search_url = f"{hibiapi}/api/pixiv/search"
return await parser_data(search_url, num, params, 'search', r18)
return await parser_data(search_url, num, params, "search", r18)


async def parser_data(url: str, num: int, params: dict, type_: str, r18: int = 0) -> "list, int":
async def parser_data(
url: str, num: int, params: dict, type_: str, r18: int = 0
) -> "list, int":
"""
解析数据
:param url: hibiapi搜索url
Expand All @@ -73,7 +75,10 @@ async def parser_data(url: str, num: int, params: dict, type_: str, r18: int = 0
for _ in range(3):
try:
async with session.get(
url, params=params, proxy=get_local_proxy(), timeout=5
url,
params=params,
proxy=get_local_proxy(),
timeout=Config.get_config("pixiv_rank_search", "TIMEOUT"),
) as response:
if response.status == 200:
data = await response.json()
Expand All @@ -87,8 +92,8 @@ async def parser_data(url: str, num: int, params: dict, type_: str, r18: int = 0
num = num if num < 30 else 30
data = data[:num]
for x in data:
if type_ == 'search' and r18 == 1:
if 'R-18' in str(x['tags']):
if type_ == "search" and r18 == 1:
if "R-18" in str(x["tags"]):
continue
title = x["title"]
author = x["user"]["name"]
Expand All @@ -114,12 +119,21 @@ async def download_pixiv_imgs(
result = ""
index = 0
for url in urls:
url = url.replace('_webp', '')
ws_url = Config.get_config("pixiv", "PIXIV_NGINX_URL")
if ws_url.startswith("http"):
ws_url = ws_url.split("//")[-1]
url = (
url.replace("i.pximg.net", ws_url)
.replace("i.pixiv.cat", ws_url)
.replace("_webp", "")
)
async with aiohttp.ClientSession(headers=headers) as session:
for _ in range(3):
try:
async with session.get(
url, proxy=get_local_proxy(), timeout=3
url,
proxy=get_local_proxy(),
timeout=Config.get_config("pixiv_rank_search", "TIMEOUT"),
) as response:
if response.status == 200:
try:
Expand Down Expand Up @@ -158,4 +172,3 @@ async def download_pixiv_imgs(
else:
result += "\n这张图下载失败了..\n"
return result

4 changes: 3 additions & 1 deletion plugins/send_setu_/send_setu/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ async def search_online_setu(
"""
ws_url = Config.get_config("pixiv", "PIXIV_NGINX_URL")
if ws_url:
url_ = url_.replace("i.pixiv.cat", ws_url)
if ws_url.startswith("http"):
ws_url = ws_url.split("//")[-1]
url_ = url_.replace("i.pximg.net", ws_url).replace("i.pixiv.cat", ws_url)
async with aiohttp.ClientSession(headers=headers) as session:
for i in range(3):
logger.info(f"search_online_setu --> {i}")
Expand Down
8 changes: 7 additions & 1 deletion plugins/send_setu_/update_setu/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from ..model import Setu
from aiohttp.client_exceptions import ClientConnectorError
from asyncpg.exceptions import UniqueViolationError
from configs.config import Config
from pathlib import Path
from nonebot import Driver
import nonebot
Expand Down Expand Up @@ -105,10 +106,15 @@ async def update_setu_img():
path.mkdir(exist_ok=True, parents=True)
rar_path.mkdir(exist_ok=True, parents=True)
if not local_image.exists() or not image.img_hash:
url_ = image.img_url
ws_url = Config.get_config("pixiv", "PIXIV_NGINX_URL")
if ws_url.startswith("http"):
ws_url = ws_url.split("//")[-1]
url_ = url_.replace("i.pximg.net", ws_url).replace("i.pixiv.cat", ws_url)
for _ in range(3):
try:
async with session.get(
image.img_url, proxy=get_local_proxy(), timeout=30
url_, proxy=get_local_proxy(), timeout=30
) as response:
if response.status == 200:
async with aiofiles.open(
Expand Down

0 comments on commit c802c44

Please sign in to comment.