Compare commits

...

40 Commits

Author SHA1 Message Date
Christoph Califice
7548e39f1e disable slskd 2026-04-12 12:46:02 -03:00
Christoph Califice
5eaddc413a add fetcharr 2026-03-13 17:59:34 -03:00
Christoph Califice
baa1936a14 remove gluetun 2026-03-11 14:50:04 -03:00
Christoph Califice
4d3554c734 skip qbtittorrent stack on StopAllContainers 2026-02-24 16:50:14 -03:00
Christoph Califice
a9e5e1b99d change qbittorrent data mount 2026-02-24 12:46:07 -03:00
Christoph Califice
4e539d6972 change cleanuparr data mount 2026-02-24 12:45:26 -03:00
Christoph Califice
fe074065dc remove huntarr 2026-02-23 22:50:42 -03:00
Christoph Califice
9024195549 latest gluetun 2026-02-22 12:04:31 -03:00
Christoph Califice
b832453177 dependency condition 2026-02-22 11:09:55 -03:00
Christoph Califice
802876a71b gluetun version 2026-02-22 11:07:01 -03:00
Christoph Califice
900afa850e set gluetun to v3.41 2026-02-21 20:06:25 -03:00
Christoph Califice
7dded38f55 qbittorrent depends on 2026-02-21 19:51:23 -03:00
Christoph Califice
7c8f09fa14 gluetun restart policy 2026-02-21 19:46:22 -03:00
Christoph Califice
5505d78933 remove whisparr 2026-02-21 19:33:28 -03:00
Christoph Califice
aea05da9a7 remove wud 2026-02-21 18:51:32 -03:00
Christoph Califice
d25c740292 gluetun tz 2026-02-21 17:32:44 -03:00
Christoph Califice
a2d08bf7c9 gluetun 2026-02-20 20:21:48 -03:00
Christoph Califice
8302064414 update to seer 2026-02-19 19:10:29 -03:00
Christoph Califice
b1cacfd703 remove gdluxx 2026-02-14 12:35:38 -03:00
Christoph Califice
b3d2846723 add gdluxx 2026-02-14 00:17:25 -03:00
Christoph Califice
ef5a822b2a add jackett 2026-02-01 17:44:42 -03:00
Christoph Califice
629762d9f5 update qb port 2026-02-01 17:07:10 -03:00
Christoph Califice
61e3193cb4 add whisparr and qb to stash 2026-02-01 17:05:21 -03:00
Christoph Califice
15bee7e1c3 add newt network 2026-01-25 15:44:09 -03:00
Christoph Califice
186e4af72f add newt 2026-01-25 15:36:19 -03:00
Christoph Califice
f7e5ede910 update cleanuparr path 2026-01-25 11:01:05 -03:00
Christoph Califice
627a0b9f5d add gallery-dl to stash 2026-01-18 15:27:13 -03:00
Christoph Califice
e477b16b0b rever uid changes 2026-01-18 00:33:04 -03:00
Christoph Califice
242bf27265 comment sonarr PUID and GUID 2026-01-18 00:16:24 -03:00
Christoph Califice
422227267d change sonarr PUID 2026-01-18 00:10:03 -03:00
Christoph Califice
129386e51e add cleanuparr 2026-01-18 00:09:03 -03:00
Christoph Califice
98defc07cc remove ubooquity 2025-12-08 19:21:44 -03:00
Christoph Califice
ac0c464900 ubooquity path 2025-12-08 19:13:31 -03:00
Christoph Califice
905af397d6 test ubooquity 2025-12-07 22:52:53 -03:00
Christoph Califice
0d89b33069 read only kavita 2025-12-07 22:25:26 -03:00
Christoph Califice
385c87e537 test webhook 2025-12-07 12:42:39 -03:00
Christoph Califice
b0b876869d separate configs from compose files 2025-12-07 12:30:14 -03:00
Christoph Califice
295bcba288 move stash generated location 2025-10-11 17:47:34 -03:00
Christoph Califice
c9d4480e96 conf 2025-10-10 09:55:44 -03:00
Christoph Califice
d1a171ce66 remove stash-box 2025-10-10 09:53:22 -03:00
1461 changed files with 285 additions and 102444 deletions

14
.gitignore vendored
View File

@@ -1,14 +0,0 @@
*
!*/
!.gitignore
!*/docker-compose.y*ml
!*/.*env*
!qbittorrent/config/qBittorrent/*.json
!stash/cron/**/*
!stash/config/config.yml
!stash/stash-box-config/*
!stash/config/plugins/**/*
qbittorrent/config/qBittorrent/qBittorrent-data.conf

View File

@@ -1,17 +1,14 @@
services:
glances:
#image: nicolargo/glances:3.4.0.5-full
image: nicolargo/glances:latest-full
container_name: glances-arr
image: nicolargo/glances:latest-full
restart: always
pid: host
ports:
- 61208:61208
- '61208:61208'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# Uncomment the below line if you want glances to display host OS detail instead of container's
- /etc/os-release:/etc/os-release:ro
- /mnt/hdd_1tb:/mnt/hdd_1tb
- /:/host
- /:/host:ro
environment:
- "GLANCES_OPT=-w"
- GLANCES_OPT=-w
pid: host

View File

@@ -1,15 +1,12 @@
services:
kavita:
image: jvmilazz0/kavita:latest # Using the stable branch from the official dockerhub repo.
container_name: kavita
volumes:
- /mnt/system/Etc/NotPorn:/images # Manga is just an example you can have the name you want. See the following
# - ./comics:/comics # Use as many as you want
# - ./books:/books #
- ./data:/kavita/config # Change './data if you want to have the config files in a different place.
# /kavita/config must not be changed
environment:
- TZ=America/Sao_Paulo
ports:
- "5000:5000" # Change the public port (the first 5000) if you have conflicts with other services
restart: unless-stopped
kavita:
container_name: kavita
image: jvmilazz0/kavita:latest
restart: unless-stopped
ports:
- '5000:5000'
volumes:
- /mnt/system/f95:/images:ro
- /home/ccalifice/docker-data/kavita/config:/kavita/config
environment:
- TZ=America/Sao_Paulo

View File

@@ -1,9 +1,10 @@
services:
## Deploy Periphery container using this block,
## or deploy the Periphery binary with systemd using
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
container_name: komodo-periphery
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped

17
newt/docker-compose.yml Normal file
View File

@@ -0,0 +1,17 @@
services:
newt:
image: fosrl/newt
container_name: newt
restart: unless-stopped
environment:
- PANGOLIN_ENDPOINT=https://pangolin.sereiaguardia.com
- NEWT_ID=b35t59vydac8cxd
- NEWT_SECRET=qwpz25jmr7a8zsvmjw8to9sewr9ns7bxzlpuy5m0i2jeoujp
networks:
- newt
networks:
newt:
name: newt
driver: bridge

View File

@@ -1,23 +0,0 @@
{
"books": {
"save_path": ""
},
"games": {
"save_path": ""
},
"movies": {
"save_path": ""
},
"music": {
"save_path": ""
},
"radarr": {
"save_path": ""
},
"tv": {
"save_path": ""
},
"xxx": {
"save_path": ""
}
}

View File

@@ -1,2 +0,0 @@
[Stats]
AllStats=@Variant(\0\0\0\x1c\0\0\0\x2\0\0\0\x12\0\x41\0l\0l\0t\0i\0m\0\x65\0\x44\0L\0\0\0\x4\0\0\tU\x2Z\x98Y\0\0\0\x12\0\x41\0l\0l\0t\0i\0m\0\x65\0U\0L\0\0\0\x4\0\0RV\x83/\x80\x65)

View File

@@ -1,113 +0,0 @@
[Application]
FileLogger\Age=1
FileLogger\AgeType=1
FileLogger\Backup=true
FileLogger\DeleteOld=true
FileLogger\Enabled=true
FileLogger\MaxSizeBytes=66560
FileLogger\Path=/config/qBittorrent/logs
[AutoRun]
OnTorrentAdded\Enabled=false
OnTorrentAdded\Program=
enabled=false
program=
[BitTorrent]
Session\AddTorrentStopped=false
Session\AlternativeGlobalDLSpeedLimit=10000
Session\AlternativeGlobalUPSpeedLimit=0
Session\BandwidthSchedulerEnabled=true
Session\DHTEnabled=true
Session\DefaultSavePath=/data/torrents
Session\DisableAutoTMMByDefault=false
Session\ExcludedFileNames=
Session\GlobalDLSpeedLimit=0
Session\IgnoreSlowTorrentsForQueueing=true
Session\LSDEnabled=true
Session\MaxActiveCheckingTorrents=2
Session\MaxActiveDownloads=10
Session\MaxActiveTorrents=100
Session\MaxActiveUploads=20
Session\MaxConnections=-1
Session\MaxUploads=-1
Session\MaxUploadsPerTorrent=-1
Session\PeXEnabled=true
Session\Port=40848
Session\QueueingSystemEnabled=true
Session\SSL\Port=55040
Session\ShareLimitAction=Stop
Session\Tags=LastFiles, SceneTime
Session\TempPath=/downloads/incomplete/
Session\UseAlternativeGlobalSpeedLimit=true
[Core]
AutoDeleteAddedTorrentFile=Never
[LegalNotice]
Accepted=true
[Meta]
MigrationVersion=8
[Network]
PortForwardingEnabled=false
Proxy\HostnameLookupEnabled=false
Proxy\Profiles\BitTorrent=true
Proxy\Profiles\Misc=true
Proxy\Profiles\RSS=true
[Preferences]
Advanced\RecheckOnCompletion=false
Advanced\trackerPort=9000
Advanced\trackerPortForwarding=false
Connection\PortRangeMin=6881
Connection\ResolvePeerCountries=true
Connection\UPnP=false
Downloads\SavePath=/downloads/
Downloads\TempPath=/downloads/incomplete/
DynDNS\DomainName=aaaaaa.dyndns.org
DynDNS\Enabled=false
DynDNS\Password=
DynDNS\Service=DynDNS
DynDNS\Username=
General\Locale=en
MailNotification\email=
MailNotification\enabled=false
MailNotification\password=
MailNotification\req_auth=true
MailNotification\req_ssl=false
MailNotification\sender=qBittorrent_notification@example.com
MailNotification\smtp_server=smtp.changeme.com
MailNotification\username=
Scheduler\end_time=@Variant(\0\0\0\xf\x5%q\xa0)
Scheduler\start_time=@Variant(\0\0\0\xf\x1\xee\x62\x80)
WebUI\Address=*
WebUI\AlternativeUIEnabled=false
WebUI\AuthSubnetWhitelist=192.168.1.0/24
WebUI\AuthSubnetWhitelistEnabled=true
WebUI\BanDuration=3600
WebUI\CSRFProtection=true
WebUI\ClickjackingProtection=true
WebUI\CustomHTTPHeaders=
WebUI\CustomHTTPHeadersEnabled=false
WebUI\HTTPS\CertificatePath=
WebUI\HTTPS\Enabled=false
WebUI\HTTPS\KeyPath=
WebUI\HostHeaderValidation=true
WebUI\LocalHostAuth=true
WebUI\MaxAuthenticationFailCount=10
WebUI\Password_PBKDF2="@ByteArray(OUcpojCntLZHBzdhkSxH9A==:6SbAK9PPyaEU4We0xKGyOEG9ktK0c1zdZszzsvi5jMtrJR0WuwZut60cV8IEDukvNXofPjWrGCv12kQCCKS/JA==)"
WebUI\Port=8181
WebUI\ReverseProxySupportEnabled=true
WebUI\RootFolder=
WebUI\SecureCookie=true
WebUI\ServerDomains=*
WebUI\SessionTimeout=3600
WebUI\TrustedReverseProxiesList=qbittorrent.ccalifice.com
WebUI\UseUPnP=false
WebUI\Username=admin
[RSS]
AutoDownloader\DownloadRepacks=true
AutoDownloader\SmartEpisodeFilter=s(\\d+)e(\\d+), (\\d+)x(\\d+), "(\\d{4}[.\\-]\\d{1,2}[.\\-]\\d{1,2})", "(\\d{1,2}[.\\-]\\d{1,2}[.\\-]\\d{4})"

View File

@@ -1,2 +0,0 @@
{
}

View File

@@ -8,38 +8,41 @@ services:
- TZ=America/Sao_Paulo
- WEBUI_PORT=8181
volumes:
- ./config:/config
- /mnt/servarr/data/torrents:/data/torrents
- /home/ccalifice/docker-data/qbittorrent/config:/config
- /mnt/servarr/data:/data
ports:
- "8181:8181"
- "40848:40848"
- "40848:40848/udp"
- "8181:8181"
- "40848:40848"
- "40848:40848/udp"
restart: unless-stopped
# network_mode: "service:gluetun"
# depends_on:
# gluetun:
# condition: service_started
labels:
komodo.skip:
# qbit_manage:
# container_name: qbit_manage
# image: ghcr.io/stuffanthings/qbit_manage:latest
# volumes:
# - ./config:/config:rw
# - /mnt/user/data/torrents/:/data/torrents:rw
# - /mnt/hdd_1tb/data/torrents:/qbittorrent/:ro
# environment:
# - QBT_RUN=false
# - QBT_SCHEDULE=1440
# - QBT_CONFIG=config.yml
# - QBT_LOGFILE=activity.log
# - QBT_CROSS_SEED=false
# - QBT_RECHECK=false
# - QBT_CAT_UPDATE=false
# - QBT_TAG_UPDATE=false
# - QBT_REM_UNREGISTERED=false
# - QBT_REM_ORPHANED=false
# - QBT_TAG_TRACKER_ERROR=false
# - QBT_TAG_NOHARDLINKS=false
# - QBT_SHARE_LIMITS=false
# - QBT_SKIP_CLEANUP=false
# - QBT_DRY_RUN=false
# - QBT_LOG_LEVEL=INFO
# - QBT_DIVIDER==
# - QBT_WIDTH=100
# restart: on-failure:2
# gluetun:
# image: qmcgaw/gluetun:latest
# container_name: gluetun_qbittorrent
# cap_add:
# - NET_ADMIN
# - NET_RAW
# environment:
# - VPN_SERVICE_PROVIDER=custom
# - VPN_TYPE=wireguard
# - WIREGUARD_ENDPOINT_IP=85.209.92.53
# - WIREGUARD_ENDPOINT_PORT=51822
# - WIREGUARD_PUBLIC_KEY=/rw71EjmWXGLftrUp/AgZ9RgKYc/5wh//yFtluq4L2A=
# - WIREGUARD_PRIVATE_KEY=wEePWTLpMDYd0Fuqzef40q8ggyActNdt7veVWOaxiV8=
# - WIREGUARD_PRESHARED_KEY=0b+TB4RdPWMhcu44/sOd2lJrHNIAheddnE54OqOyrZQ=
# - WIREGUARD_ADDRESSES=10.8.0.4/32
# - TZ=America/Sao_Paulo
# ports:
# - 8181:8181
# - 40848:40848
# - 40848:40848/udp
# restart: unless-stopped
# labels:
# komodo.skip:

View File

@@ -1,4 +1,3 @@
---
services:
sonarr:
image: lscr.io/linuxserver/sonarr:latest
@@ -8,7 +7,7 @@ services:
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- ./sonarr:/config
- /home/ccalifice/docker-data/servarr/sonarr:/config
- /mnt/servarr/data:/data
ports:
- "8989:8989"
@@ -22,42 +21,163 @@ services:
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- ./radarr:/config
- /home/ccalifice/docker-data/servarr/radarr:/config
- /mnt/servarr/data:/data
ports:
- "7878:7878"
restart: unless-stopped
slskd:
image: slskd/slskd
container_name: slskd
ports:
- "5030:5030"
- "5031:5031"
- "50300:50300"
environment:
- SLSKD_REMOTE_CONFIGURATION=true
- SLSKD_SLSK_USERNAME=ticonouvido
- SLSKD_SLSK_PASSWORD=asdf1234
volumes:
- ./slskd:/app
- /mnt/servarr/data/media/music:/music
restart: unless-stopped
# slskd:
# image: slskd/slskd
# container_name: slskd
# ports:
# - "5030:5030"
# - "5031:5031"
# - "50300:50300"
# environment:
# - SLSKD_REMOTE_CONFIGURATION=true
# - SLSKD_SLSK_USERNAME=ticonouvido
# - SLSKD_SLSK_PASSWORD=asdf1234
# volumes:
# - /home/ccalifice/docker-data/servarr/slskd:/app
# - /mnt/servarr/data/torrents/music:/music
# - /mnt/servarr/data/media/music:/music-organized
# restart: unless-stopped
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
prowlarr:
image: lscr.io/linuxserver/prowlarr:develop
container_name: prowlarr
environment:
- PUID=1000
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- ./lidarr:/config
- /mnt/servarr/data/:/data #optional
- /home/ccalifice/docker-data/servarr/prowlarr:/config
ports:
- 8686:8686
- "9696:9696"
restart: unless-stopped
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
environment:
- PUID=1000
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- /home/ccalifice/docker-data/servarr/bazarr:/config
- /mnt/servarr/data/media/movies:/movies
- /mnt/servarr/data/media/tv:/tv
ports:
- "6767:6767"
restart: unless-stopped
seerr:
image: ghcr.io/seerr-team/seerr:latest
init: true
container_name: seerr
user: "1000:1002"
environment:
- LOG_LEVEL=info
- TZ=America/Sao_Paulo
- PORT=5055 #optional
ports:
- 5055:5055
volumes:
- /home/ccalifice/docker-data/servarr/seer:/app/config
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
start_period: 20s
timeout: 3s
interval: 15s
retries: 3
restart: unless-stopped
networks:
- seer
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=America/Sao_Paulo
ports:
- "${PORT:-8191}:8191"
restart: unless-stopped
cleanuparr:
image: ghcr.io/cleanuparr/cleanuparr:latest
container_name: cleanuparr
restart: unless-stopped
ports:
- "11011:11011"
volumes:
- /home/ccalifice/docker-data/cleanuparr:/config
# Mount your downloads directory if needed
- /mnt/servarr/data:/data
environment:
- PORT=11011
- BASE_PATH=
- PUID=1000
- PGID=1002
- UMASK=022
- TZ=America/Sao_Paulo
# Health check configuration
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11011/health"]
interval: 30s # Check every 30 seconds
timeout: 10s # Allow up to 10 seconds for response
start_period: 30s # Wait 30 seconds before first check
retries: 3 # Mark unhealthy after 3 consecutive failures
#revocery-key: light-tower-river-95
fetcharr:
image: egg82/fetcharr:latest
environment:
- VERIFY_CERTS=false
- SSL_PATH=/etc/ssl/certs/ca-bundle.crt
- SEARCH_AMOUNT=5
- SEARCH_INTERVAL=1hour
- RADARR_0_URL=http://192.168.1.201:7878
- RADARR_0_API_KEY=87d8baf79d4f4610954db015e54456f0
- SONARR_0_URL=http://192.168.1.201:8989
- SONARR_0_API_KEY=b48e381cf57548c8b3bb286956940bb0
# jackett:
# image: lscr.io/linuxserver/jackett:latest
# container_name: jackett
# environment:
# - PUID=1000
# - PGID=1002
# - TZ=America/Sao_Paulo
# - AUTO_UPDATE=true #optional
# - RUN_OPTS= #optional
# volumes:
# - ./jackett:/config
# - ./blackhole:/downloads
# ports:
# - 9117:9117
# restart: unless-stopped
# lidarr:
# image: ghcr.io/hotio/lidarr:pr-plugins
# container_name: lidarr
# environment:
# - PUID=1000
# - PGID=1002
# - TZ=America/Sao_Paulo
# volumes:
# - /home/ccalifice/docker-data/servarr/lidarr:/config
# - /mnt/servarr/data/:/data #optional
# ports:
# - 8686:8686
# restart: unless-stopped
# soularr:
# restart: unless-stopped
@@ -78,78 +198,5 @@ services:
# - ./soularr:/data
# image: mrusse08/soularr:latest
prowlarr:
image: lscr.io/linuxserver/prowlarr:develop
container_name: prowlarr
environment:
- PUID=1000
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- ./prowlarr:/config
ports:
- "9696:9696"
restart: unless-stopped
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
environment:
- PUID=1000
- PGID=1002
- TZ=America/Sao_Paulo
volumes:
- ./bazarr:/config
- /mnt/servarr/data/media/movies:/movies
- /mnt/servarr/data/media/tv:/tv
ports:
- "6767:6767"
restart: unless-stopped
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- LOG_LEVEL=info
- TZ=America/Sao_Paulo
ports:
- 5055:5055
volumes:
- ./jellyseer:/app/config
restart: unless-stopped
networks:
- pangolin
flaresolverr:
# DockerHub mirror flaresolverr/flaresolverr:latest
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=America/Sao_Paulo
ports:
- "${PORT:-8191}:8191"
restart: unless-stopped
jackett:
image: lscr.io/linuxserver/jackett:latest
container_name: jackett
environment:
- PUID=1000
- PGID=1002
- TZ=America/Sao_Paulo
- AUTO_UPDATE=true #optional
- RUN_OPTS= #optional
volumes:
- ./jackett:/config
- ./blackhole:/downloads
ports:
- 9117:9117
restart: unless-stopped
networks:
pangolin:
external: true
name: pangolin
seer:

View File

@@ -1,245 +0,0 @@
api_key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aWQiOiJjY2FsaWZpY2UiLCJzdWIiOiJBUElLZXkiLCJpYXQiOjE3NDE5NTY3OTN9.UWTnuuWhw0mFNQVWcXCer-NA8IyipxoGx0VieHW7XRQ
blobs_path: /root/.stash/blobs
blobs_storage: FILESYSTEM
calculate_md5: false
create_galleries_from_folders: true
create_image_clip_from_videos: false
dangerous_allow_public_without_auth: "false"
database: /root/.stash/stash-go.sqlite
defaults:
auto_tag_task:
paths: []
performers:
- '*'
studios:
- '*'
tags:
- '*'
generate_task:
clippreviews: false
covers: true
forcetranscodes: false
imagepreviews: false
imagethumbnails: true
interactiveheatmapsspeeds: false
markerids: []
markerimagepreviews: false
markers: false
markerscreenshots: false
overwrite: false
phashes: true
previewoptions:
previewexcludeend: "0"
previewexcludestart: "0"
previewpreset: slow
previewsegmentduration: 0.75
previewsegments: 12
previews: false
sceneids: []
sprites: false
transcodes: false
identify_task:
sources:
- source:
stashboxindex: null
stashboxendpoint: null
scraperid: builtin_autotag
options:
fieldoptions: []
setcoverimage: null
setorganized: false
includemaleperformers: null
skipmultiplematches: true
skipmultiplematchtag: null
skipsinglenameperformers: true
skipsinglenameperformertag: null
- source:
stashboxindex: null
stashboxendpoint: https://stashdb.org/graphql
scraperid: null
options: null
options:
fieldoptions:
- field: title
strategy: IGNORE
createmissing: false
- field: studio
strategy: MERGE
createmissing: true
- field: performers
strategy: MERGE
createmissing: true
- field: tags
strategy: MERGE
createmissing: true
setcoverimage: false
setorganized: false
includemaleperformers: true
skipmultiplematches: true
skipmultiplematchtag: null
skipsinglenameperformers: true
skipsinglenameperformertag: null
sceneids: []
paths: []
scan_task:
scangenerateclippreviews: true
scangeneratecovers: true
scangenerateimagepreviews: false
scangeneratephashes: false
scangeneratepreviews: true
scangeneratesprites: false
scangeneratethumbnails: true
dlna:
default_whitelist:
- 192.168.1.80
exclude: []
gallery_cover_regex: (poster|cover|folder|board)\.[^\.]+$
generated: ""
host: 0.0.0.0
image_exclude:
- .*/f95/.*
jwt_secret_key: 465db67dd7d0cbf0848283511375283970db6300c7c25ff57d5fcdc901116322
menu_items:
- scenes
- images
- groups
- markers
- galleries
- performers
- studios
- tags
no_proxy: localhost,127.0.0.1,192.168.0.0/16,10.0.0.0/8,172.16.0.0/12
nobrowser: false
notifications_enabled: false
parallel_tasks: 0
password: $2a$04$EcHNpS7.7GTpKNshc.QNQu53mbnVGomC5q9.YCkVPhVWBPquLIdoa
plugins:
package_sources:
- localpath: community
name: Community (stable)
url: https://stashapp.github.io/CommunityScripts/stable/index.yml
settings:
DupFileManager:
matchDupDistance: 1
zzObsoleteSettingsCheckVer2: true
plugins_path: /root/.stash/plugins
port: 9999
preview_audio: true
preview_exclude_end: "0"
preview_exclude_start: "0"
preview_preset: medium
preview_segment_duration: 0.75
preview_segments: 12
scrapers:
package_sources:
- localpath: community
name: Community (stable)
url: https://stashapp.github.io/CommunityScrapers/stable/index.yml
scrapers_path: /root/.stash/scrapers
security_tripwire_accessed_from_public_internet: ""
sequential_scanning: false
session_store_key: 5f2ae4ea161237c076aeb33c1f9a0e3f13a4d8092b3f76a669fe2e91e02f1d36
show_one_time_moved_notification: false
sound_on_preview: false
stash:
- excludeimage: false
excludevideo: false
path: /data/
stash_boxes:
- endpoint: https://stashdb.org/graphql
apikey: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aWQiOiJhMmU5NTc3Ny0zOGEwLTQwMWYtYWMxNC01MjBkZmFjOWJhOTMiLCJzdWIiOiJBUElLZXkiLCJpYXQiOjE3NTk3NjAxMjV9.gOzykd_FyhgUNiircH9gE7S3nvLQrNMQ_26yzupVMkY
name: stash-box
theme_color: '#202b33'
ui:
advancedMode: true
frontPageContent:
- __typename: CustomFilter
direction: DESC
message:
id: recently_released_objects
values:
objects: Scenes
mode: SCENES
sortBy: date
- __typename: CustomFilter
direction: DESC
message:
id: recently_added_objects
values:
objects: Studios
mode: STUDIOS
sortBy: created_at
- __typename: CustomFilter
direction: DESC
message:
id: recently_released_objects
values:
objects: Movies
mode: MOVIES
sortBy: date
- __typename: CustomFilter
direction: DESC
message:
id: recently_added_objects
values:
objects: Performers
mode: PERFORMERS
sortBy: created_at
- __typename: CustomFilter
direction: DESC
message:
id: recently_released_objects
values:
objects: Galleries
mode: GALLERIES
sortBy: date
lastNoteSeen: "20240826"
pinnedFilters:
scenes:
- performers
- tags
taskDefaults:
autoTag:
performers:
- '*'
studios: []
tags:
- '*'
cleanGenerated:
blobFiles: true
dryRun: false
imageThumbnails: true
markers: true
screenshots: true
sprites: true
transcodes: true
generate:
clipPreviews: false
covers: true
imagePreviews: false
imageThumbnails: true
interactiveHeatmapsSpeeds: false
markerImagePreviews: false
markerScreenshots: false
markers: false
phashes: true
previewOptions:
previewExcludeEnd: "0"
previewExcludeStart: "0"
previewPreset: medium
previewSegmentDuration: 0.75
previewSegments: 12
previews: false
sprites: false
transcodes: false
scan:
scanGenerateClipPreviews: true
scanGenerateCovers: true
scanGenerateImagePreviews: false
scanGeneratePhashes: true
scanGeneratePreviews: true
scanGenerateSprites: false
scanGenerateThumbnails: true
username: ccalifice
video_file_naming_algorithm: OSHASH
write_image_thumbnails: true

View File

@@ -1,6 +0,0 @@
name: CommunityScriptsUILibrary
description: CommunityScripts UI helper library
version: 1.0.0
ui:
javascript:
- cs-ui-lib.js

View File

@@ -1,90 +0,0 @@
# CommunityScripts UI Plugin library
This is a set of slimmed down functions from StashUserscriptLibrary, targeting CommunityScripts originally introduced in [#319](https://github.com/stashapp/CommunityScripts/pull/319)
## functions
All the following functions are exposed under `window.csLib` and `csLib`
## callGQL
```js
/**
* This is a wrapped GraphQL (GQL) query caller
* @param {...Object} reqData
* @param {Object} reqData.query - GraphQL query
* @param {Object}= reqData.variables - GraphQL variables
* @returns {Object} - GQL response data with the `data` wrapper removed
*
* @example
* // fetch the count of organized scenes
* const filter = { organized: true };
* const query = `query findScenes($filter: SceneFilter) { findScenes(filter: $filter) { count } }`;
* const variables = { filter };
* const response = await callGQL({ query, variables });
* // response = { findScenes: { count: 3 } }
*/
```
## getConfiguration
```js
/**
* Get configuration of a plugin from the server via GraphQL
* @param {string} pluginId - The ID of the plugin as it is registered in the server
* @param {*}= fallback - Fallback value if the configuration is not found. Defaults to an empty object
* @returns {Object} - The configuration object of the plugin as it is stored in the server
*
* @example
* // set default config
* const defaultConfig = { enabled: true, theme: 'light' };
* // fetch config from the server
* const config = await getConfiguration('CommunityScriptsUIPlugin', defaultConfig);
* // config = { theme: 'dark' }
* // merge fetched with default config
* const pluginConfig = {
* ...defaultConfig
* ...config
* };
* // pluginConfig = { enabled: true, theme: 'dark' }
* }
*/
```
## setConfiguration
```js
/**
* Set configuration of a plugin in the server via GraphQL
* @param {string} pluginId - The ID of the plugin as it is registered in the server
* @param {*} values - The configuration object with the values you want to save in the server
* @returns {Object} - The configuration object of the plugin as it is stored in the server after update
*
* @example
* // fetch config from the server
* const config = await getConfiguration('CommunityScriptsUIPlugin', defaultConfig);
* // config = { theme: 'dark' }
* // update the config based on user input
* // config = { theme: 'light' }
* // save config in the server
* await setConfiguration('CommunityScriptsUIPlugin', config);
* }
*/
```
## waitForElement
```js
/**
* Waits for an element to be available in the DOM and runs the callback function once it is
* @param {string} selector - The CSS selector of the element to wait for
* @param {function} callback - The function to be called once the element is available (with the element as an argument)
* @returns
*
* @example
* // wait for the element with the class 'my-element' to be available
* // and change its color to red
* function myCallback(el) {
* el.style.color = 'red';
* };
* waitForElement('.my-element', myCallback);
*/
```
## pathElementListener

View File

@@ -1,141 +0,0 @@
// CommunityScripts UI Library
// cs-ui-lib.js
(function () {
// get base URL for graphQL queries
const baseURL = document.querySelector("base")?.getAttribute("href") ?? "/";
/**
* This is a wrapped GraphQL (GQL) query caller
* @param {...Object} reqData
* @param {Object} reqData.query - GraphQL query
* @param {Object}= reqData.variables - GraphQL variables
* @returns {Object} - GQL response data with the `data` wrapper removed
*
* @example
* // fetch the count of organized scenes
* const filter = { organized: true };
* const query = `query findScenes($filter: SceneFilter) { findScenes(filter: $filter) { count } }`;
* const variables = { filter };
* const response = await callGQL({ query, variables });
* // response = { findScenes: { count: 3 } }
*/
const callGQL = (reqData) =>
fetch(`${baseURL}graphql`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(reqData),
})
.then((res) => res.json())
.then((res) => res.data);
/**
* Get configuration of a plugin from the server via GraphQL
* @param {string} pluginId - The ID of the plugin as it is registered in the server
* @param {*}= fallback - Fallback value if the configuration is not found. Defaults to an empty object
* @returns {Object} - The configuration object of the plugin as it is stored in the server
*
* @example
* // set default config
* const defaultConfig = { enabled: true, theme: 'light' };
* // fetch config from the server
* const config = await getConfiguration('CommunityScriptsUIPlugin', defaultConfig);
* // config = { theme: 'dark' }
* // merge fetched with default config
* const pluginConfig = {
* ...defaultConfig
* ...config
* };
* // pluginConfig = { enabled: true, theme: 'dark' }
* }
*/
const getConfiguration = async (pluginId, fallback = {}) => {
const query = `query Configuration { configuration { plugins }}`;
const response = await callGQL({ query });
return response.configuration.plugins?.[pluginId] ?? fallback;
};
/**
* Set configuration of a plugin in the server via GraphQL
* @param {string} pluginId - The ID of the plugin as it is registered in the server
* @param {*} values - The configuration object with the values you want to save in the server
* @returns {Object} - The configuration object of the plugin as it is stored in the server after update
*
* @example
* // fetch config from the server
* const config = await getConfiguration('CommunityScriptsUIPlugin', defaultConfig);
* // config = { theme: 'dark' }
* // update the config based on user input
* // config = { theme: 'light' }
* // save config in the server
* await setConfiguration('CommunityScriptsUIPlugin', config);
* }
*/
const setConfiguration = async (pluginId, values) => {
const query = `mutation ConfigurePlugin($pluginId: ID!, $input: Map!) { configurePlugin(plugin_id: $pluginId, input: $input) }`;
const queryBody = {
query: query,
variables: {
pluginId: pluginId,
input: values,
},
};
const response = await csLib.callGQL({ ...queryBody });
return response.configurePlugin;
};
/**
* Waits for an element to be available in the DOM and runs the callback function once it is
* @param {string} selector - The CSS selector of the element to wait for
* @param {function} callback - The function to be called once the element is available (with the element as an argument)
* @returns
*
* @example
* // wait for the element with the class 'my-element' to be available
* // and change its color to red
* function myCallback(el) {
* el.style.color = 'red';
* };
* waitForElement('.my-element', myCallback);
*/
function waitForElement(selector, callback) {
var el = document.querySelector(selector);
if (el) return callback(el);
setTimeout(waitForElement, 100, selector, callback);
}
/**
* Wait for a specific element to be available on a specific path
* This combines the `waitForElement` and `PluginApi.Event.addEventListener` functions to only trigger on certain pages
*
* @param {string} path - The path to listen for
* @param {string} element - The CSS selector of the element to wait for
* @param {function} callback - The function to be called once the element is available (with the element as an argument)
*
* @example
* // wait for the element with the class 'my-element' to be available, but only on the `/scene/#` path
* function myCallback(el) {
* el.style.color = 'red';
* };
* PathElementListener('/scene/', '.my-element', myCallback);
* // this will only trigger the callback function when the user is on the `/scene/` path AND the element is available
*/
const PathElementListener = (path, element, callback) => {
// startup location
if (window.location.pathname.startsWith(path))
waitForElement(element, callback);
PluginApi.Event.addEventListener("stash:location", (e) => {
if (e.detail.data.location.pathname.startsWith(path))
waitForElement(element, callback);
});
};
// export to window
window.csLib = {
baseURL,
callGQL,
getConfiguration,
setConfiguration,
waitForElement,
PathElementListener,
};
})();

View File

@@ -1,12 +0,0 @@
id: CommunityScriptsUILibrary
name: CommunityScriptsUILibrary
metadata:
description: CommunityScripts UI helper library
version: 1.0.0-512fbb8
date: "2024-11-27 21:50:31"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- cs-ui-lib.js
- README.md
- CommunityScriptsUILibrary.yml

View File

@@ -1,12 +0,0 @@
[251006 10:33:16] [LN:95] INF: '******************* Starting *******************'
[251006 10:33:17] [LN:1573] INF: "Sending json value {'LocalDupReportExist' : false, 'Path': '/root/.stash/plugins/DupFileManager/report/DuplicateTagScenes.html'}"
[251006 10:33:17] [LN:1984] INF: '\n*********************************\nEXITING ***********************\n*********************************'
[251006 10:33:17] [LN:95] INF: '******************* Starting *******************'
[251006 10:33:17] [LN:1573] INF: "Sending json value {'LocalDupReportExist' : false, 'Path': '/root/.stash/plugins/DupFileManager/report/DuplicateTagScenes.html'}"
[251006 10:33:17] [LN:1984] INF: '\n*********************************\nEXITING ***********************\n*********************************'
[251006 10:35:25] [LN:95] INF: '******************* Starting *******************'
[251006 10:35:25] [LN:1573] INF: "Sending json value {'LocalDupReportExist' : false, 'Path': '/root/.stash/plugins/DupFileManager/report/DuplicateTagScenes.html'}"
[251006 10:35:25] [LN:1984] INF: '\n*********************************\nEXITING ***********************\n*********************************'
[251006 10:35:26] [LN:95] INF: '******************* Starting *******************'
[251006 10:35:26] [LN:1573] INF: "Sending json value {'LocalDupReportExist' : false, 'Path': '/root/.stash/plugins/DupFileManager/report/DuplicateTagScenes.html'}"
[251006 10:35:26] [LN:1984] INF: '\n*********************************\nEXITING ***********************\n*********************************'

View File

@@ -1,179 +0,0 @@
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
.deepface
voy_db
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
# End of https://www.toptal.com/developers/gitignore/api/python

View File

@@ -1,371 +0,0 @@
import os
import sys
import zipfile
import tempfile
from PythonDepManager import ensure_import
# --- VENV AUTO-CREATION WITH REQUIREMENTS AND AUTO-RESTART ---
venv_dir = os.path.join(os.path.dirname(__file__), "venv")
requirements_path = os.path.join(os.path.dirname(__file__), "requirements.txt")
# --- PYTHON VERSION CHECK ---
if not os.path.isdir(venv_dir) and not (sys.version_info.major == 3 and sys.version_info.minor == 10):
ensure_import("stashapi:stashapp-tools>=0.2.58")
import stashapi.log as log
log.error("Error: Python version must be >= 3.10.X (recommanded 3.10.11) for the first installation of the plugin. Once installed you can change back your python version in stash as this plugin will run within its own venv")
log.error(f"Current version: {sys.version}")
log.error("Go to https://www.python.org/downloads/release/python-31011/")
sys.exit(1)
# --- END PYTHON VERSION CHECK ---
def in_venv():
# Checks if running inside the venv we expect
return (
hasattr(sys, 'real_prefix') or
(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)
) and os.path.abspath(sys.prefix) == os.path.abspath(venv_dir)
def install_dependencies():
"""
Install dependencies from requirements.txt if not already installed.
"""
if not os.path.isfile(requirements_path):
print("No requirements.txt found, skipping dependency installation.")
return
import subprocess
pip_exe = os.path.join(venv_dir, "Scripts", "pip.exe") if os.name == "nt" else os.path.join(venv_dir, "bin", "pip")
py_exe = os.path.join(venv_dir, "Scripts", "python.exe") if os.name == "nt" else os.path.join(venv_dir, "bin", "python")
subprocess.check_call([py_exe,"-m","pip", "install", "--upgrade", "pip"])
subprocess.check_call([pip_exe, "install", "-r", requirements_path])
if not os.path.isdir(venv_dir):
ensure_import("stashapi:stashapp-tools>=0.2.58")
import stashapi.log as log
import subprocess
log.info("No venv found. Creating virtual environment...")
subprocess.check_call([sys.executable, "-m", "venv", venv_dir])
log.progress(0.25)
log.info("Virtual environment created at "+ venv_dir)
if os.path.isfile(requirements_path):
log.info("Installing dependencies... This might take a while")
install_dependencies()
else:
log.info("No requirements.txt found, skipping dependency installation.")
# If not running in the venv, restart the script using the venv's Python
if not in_venv():
py_exe = os.path.join(venv_dir, "Scripts", "python.exe") if os.name == "nt" else os.path.join(venv_dir, "bin", "python")
print(f"Restarting script in venv: {py_exe}")
os.execv(py_exe, [py_exe] + sys.argv)
# --- END VENV AUTO-CREATION WITH REQUIREMENTS AND AUTO-RESTART ---
import json
import subprocess
import platform
# Set environment variables
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Suppress TF logs
# Ensure dependencies
try:
from deepface import DeepFace
import numpy as np
import psutil
import stashapi.log as log
from stashapi.stashapp import StashInterface
except:
install_dependencies()
from deepface import DeepFace
import numpy as np
import psutil
import stashapi.log as log
from stashapi.stashapp import StashInterface
VOY_DB_PATH = os.path.join(os.path.dirname(__file__), "voy_db")
os.makedirs(os.path.join(VOY_DB_PATH, "facenet"), exist_ok=True)
os.makedirs(os.path.join(VOY_DB_PATH, "arc"), exist_ok=True)
def main():
"""
Main entry point for the plugin.
"""
global stash
json_input = json.loads(sys.stdin.read())
stash = StashInterface(json_input["server_connection"])
mode_arg = json_input["args"].get("mode")
config = stash.get_configuration()["plugins"]
settings = {"voyCount": 15, "sceneCount": 0, "imgCount": 0}
if "LocalVisage" in config:
settings.update(config["LocalVisage"])
if mode_arg == "spawn_server":
spawn_server(json_input["server_connection"])
elif mode_arg == "stop_server":
kill_stashface_server()
elif mode_arg == "rebuild_model":
rebuild_model(update_only=False, settings=settings)
elif mode_arg == "update_model":
rebuild_model(update_only=True, settings=settings)
def can_read_image(image_path):
"""
Check if an image path can be read, handling both regular files and files inside ZIP archives.
Args:
image_path (str): Path to the image file
Returns:
tuple: (can_read, actual_path) where can_read is bool and actual_path is the path to use
"""
if os.path.exists(image_path):
return True, image_path
# Check if it's inside a ZIP file
if ".zip" in image_path.lower():
try:
parts = image_path.split(".zip")
if len(parts) >= 2:
zip_path = parts[0] + ".zip"
internal_path = parts[1].lstrip(os.sep + "/") # Remove leading separators
if os.path.exists(zip_path):
with zipfile.ZipFile(zip_path, 'r') as zip_file:
# Check if the internal path exists in the ZIP
if internal_path in zip_file.namelist():
# Extract to temporary file and return temp path
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(internal_path)[1]) as tmp_file:
tmp_file.write(zip_file.read(internal_path))
return True, tmp_file.name
except Exception as e:
log.warning(f"Error reading from ZIP file {image_path}: {e}")
return False, image_path
def cleanup_temp_file(file_path):
"""
Clean up temporary files created for ZIP extraction.
Args:
file_path (str): Path to the temporary file
"""
try:
if file_path.startswith(tempfile.gettempdir()):
os.unlink(file_path)
except Exception as e:
log.warning(f"Error cleaning up temporary file {file_path}: {e}")
def find_performers(settings):
"""
Find performers with images for model building.
"""
query={}
# query performers based on sceneCount and imgCount settings
scene_count_min = settings.get("sceneCount", 0)
img_count_min = settings.get("imgCount", 0)
if scene_count_min>0 or img_count_min>0:
query={
"scene_count": {"modifier": "GREATER_THAN", "value": scene_count_min-1},
"image_count": {"modifier": "GREATER_THAN", "value": img_count_min-1},
}
performers_all = stash.find_performers(f=query, fragment="id name image_path custom_fields")
performers_without_image = stash.find_performers(f={"is_missing": "image"}, fragment="id")
performers_without_image_ids = {p["id"] for p in performers_without_image}
performers_to_process = [p for p in performers_all if p["id"] not in performers_without_image_ids]
performers_to_process = [
p for p in performers_to_process
if (p.get("scene_count", 0) >= scene_count_min and
p.get("image_count", 0) >= img_count_min)
]
return enrich_performers(performers_to_process, settings)
def enrich_performers(performers, settings):
"""
Add extra images to each performer for embedding calculation.
"""
for progress, performer in enumerate(performers):
performer["images"] = []
if performer.get("image_path"):
performer["images"].append(performer["image_path"])
extra_images = stash.find_images(
filter={
"direction": "ASC",
"page": 1,
"per_page": settings.get("voyCount", 15) - 1,
"q": "",
"sort": "random_11365347"
},
f={
"performer_count": {"modifier": "EQUALS", "value": 1},
"performers": {"modifier": "INCLUDES_ALL", "value": [performer["id"]]},
"path": {
"modifier": "NOT_MATCHES_REGEX",
"value": r".*\.(mp4|webm|avi|mov|mkv|flv|wmv|gif)$|.*[^\x00-\x7F].*"
}
}
)
for image in extra_images:
if image.get("visual_files") and len(image["visual_files"]) > 0:
image_path = image["visual_files"][0]["path"]
can_read, actual_path = can_read_image(image_path)
if can_read:
performer["images"].append(actual_path)
else:
log.warning(f"Image path does not exist and cannot be read: {image_path}")
else:
log.warning(f"No visual files found for image ID: {image['id']}")
log.progress((progress + 1) / len(performers))
return performers
def rebuild_model(update_only, settings):
"""
Build or update the face embedding model for all performers.
"""
log.info("Updating model..." if update_only else "Rebuilding model...")
performers = find_performers(settings)
if not performers:
log.info("No performers found for model building.")
return
log.info("Database scraped, starting to rebuild model...")
for progress, performer in enumerate(performers):
embeddings_facenet = []
embeddings_arc = []
custom_fields = performer.get("custom_fields", {})
images_used = custom_fields.get("number_of_images_used_for_voy", 0)
if update_only and images_used >= settings["voyCount"]:
continue
if update_only and len(performer["images"]) <= images_used:
continue
for uri in performer["images"]:
try:
result_facenet = DeepFace.represent(
img_path=uri,
model_name="Facenet512",
detector_backend='yolov8',
normalization='Facenet2018',
align=True,
enforce_detection=False
)
embeddings_facenet.append(result_facenet[0]['embedding'])
result_arc = DeepFace.represent(
img_path=uri,
model_name="ArcFace",
detector_backend='yolov8',
enforce_detection=False,
align=True
)
embeddings_arc.append(result_arc[0]['embedding'])
except Exception as e:
log.warning(f"[WARN] Skipping {uri}: {e}")
finally:
# Clean up temporary files created for ZIP extraction
cleanup_temp_file(uri)
if embeddings_facenet and embeddings_arc:
avg_embedding_facenet = np.mean(embeddings_facenet, axis=0).astype(np.float32)
facenet_path = os.path.join(VOY_DB_PATH, "facenet", f"{performer['id']}-{performer['name']}.voy")
np.save(facenet_path, avg_embedding_facenet)
avg_embedding_arc = np.mean(embeddings_arc, axis=0).astype(np.float32)
arc_path = os.path.join(VOY_DB_PATH, "arc", f"{performer['id']}-{performer['name']}.voy")
np.save(arc_path, avg_embedding_arc)
embeddings_count = max(len(embeddings_facenet), len(embeddings_arc))
stash.update_performer({
"id": performer["id"],
"custom_fields": {
"partial": {
"number_of_images_used_for_voy": embeddings_count,
}
}
})
log.info(f"[INFO] Saved VOY for {performer['name']} with {embeddings_count} images.")
else:
log.warning(f"[WARN] No valid embeddings for {performer['name']}.")
log.progress((progress + 1) / len(performers))
log.info("Rebuilding model finished.")
if server_running():
kill_stashface_server()
# Optionally, reload server with new connection info if needed
def server_running():
"""
Check if the stashface server is running.
"""
try:
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
name = proc.info.get('name', '').lower()
cmdline_raw = proc.info.get('cmdline')
if not cmdline_raw:
continue
cmdline = [str(arg).lower() for arg in cmdline_raw]
if 'python' in name and any('stashface' in arg and 'app.py' in arg for arg in cmdline):
log.debug("Stashface server is already running.")
return True
except psutil.NoSuchProcess:
return False
return False
def kill_stashface_server():
"""
Kill any running stashface server processes.
"""
killed = False
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
try:
cmdline = proc.info['cmdline']
if cmdline and any('stashface' in arg and 'app.py' in arg for arg in cmdline):
log.debug(f"Killing process {proc.pid}: {' '.join(cmdline)}")
proc.kill()
killed = True
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
if killed:
log.info("Stashface server killed.")
def spawn_server(server_connection=None):
"""
Spawn the stashface server as a subprocess.
"""
if server_running():
log.info("Stashface server is already running.")
return
plugin_dir = os.path.dirname(__file__)
py_exe = os.path.join(venv_dir, "Scripts", "python.exe") if os.name == "nt" else os.path.join(venv_dir, "bin", "python")
cmd = [
py_exe,
os.path.abspath(os.path.join(plugin_dir, "stashface", "app.py")),
]
log.info("Spawning server")
env = os.environ.copy()
if server_connection is not None:
env["SERVER_CONNECTION"] = json.dumps(server_connection)
if platform.system() == "Windows":
subprocess.Popen(
cmd,
creationflags=subprocess.CREATE_NEW_CONSOLE,
close_fds=True,
cwd=plugin_dir,
env=env
)
else:
subprocess.Popen(
cmd,
start_new_session=True,
close_fds=True,
cwd=plugin_dir,
env=env
)
log.info("Server spawned successfully, you can now use the plugin.")
if __name__ == '__main__':
main()

View File

@@ -1,57 +0,0 @@
name: Local Visage
description: Local Performer Recognition plugin using DeepFace
# requires: PythonDepManager
# requires: stashUserscriptLibrary7dJx1qP
version: 1.0.1
exec:
- python
- "{pluginDir}/LocalVisage.py"
interface: raw
ui:
requires:
- stashUserscriptLibrary7dJx1qP
javascript:
- frontend.js
- https://cdn.jsdelivr.net/npm/@gradio/client@1.15.3/dist/index.js
css:
- frontend.css
csp:
connect-src:
- http://localhost:7860
- http://192.168.1.198:7860
- http://your-server-ip-address:7860
script-src:
- https://cdn.jsdelivr.net/npm/html2canvas@1.4.1/dist/html2canvas.min.js
- https://cdn.jsdelivr.net/npm/@gradio/client@1.15.3/dist/index.js
tasks:
- name: Rebuild Face Recognition Model
description: Rebuild the face recognition model entirely
defaultArgs:
mode: rebuild_model
- name: Update Face Recognition Model
description: Update the face performers model with new images if there model was built on less than "Target image count per voy" images
defaultArgs:
mode: update_model
- name: Start server
description: Start the face recognition server (if not started) to allow the plugin to work
defaultArgs:
mode: spawn_server
- name: Stop server
description: Stop the face recognition server
defaultArgs:
mode: stop_server
settings:
voyCount:
displayName: Target image count per voy (default is 15)
description: Number of images to to use to create the face recognition model (per performer)
type: NUMBER
imgCount:
displayName: Minimum number of images for performer to be added to model
description: Minimum number of images a performer must have to be included in recognition (EXCLUDING THE PERFORMER THUMBNAIL). Set to 0 for best result.
type: NUMBER
sceneCount:
displayName: Minimum number of scenes for performer to be added to model
description: Minimum number of scenes a performer must have to be included in recognition
type: NUMBER

View File

@@ -1,136 +0,0 @@
button.svelte-localhjf {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-localhjf {
animation: svelte-localhjf-pulse 2s infinite;
}
@keyframes svelte-localhjf-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
svg.svelte-localhjf {
fill: #ffffff;
}
button.svelte-localhjf {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-localhjf {
animation: svelte-localhjf-pulse 2s infinite;
}
@keyframes svelte-localhjf-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
svg.svelte-localhjf {
fill: #ffffff;
}
.carousel.svelte-localhja {
display: flex;
overflow-x: auto;
overflow-y: auto;
white-space: nowrap;
overscroll-behavior-x: contain;
overscroll-behavior-y: contain;
scroll-snap-type: x mandatory;
gap: 1rem;
}
.modal-header.svelte-localhja {
font-size: 2.4rem;
border-bottom: 0px;
padding: 10px 10px 0px 10px;
}
.modal-footer.svelte-localhja {
border-top: 0px;
}
.svelte-localhja::-webkit-scrollbar {
width: 30px;
}
.svelte-localhja::-webkit-scrollbar-thumb {
background: var(--orange);
border-radius: 20px;
}
.card.svelte-localhja {
max-width: 78%;
}
.performer-card.svelte-localhja {
cursor: pointer;
}
.performer-card-image .svelte-localhja {
min-width: none !important;
aspect-ratio: 4/5;
}
.assigned.svelte-localhja {
border: 5px solid var(--green);
animation: border 1s ease-in-out;
}
.face-tab.svelte-localhja {
width: 50px;
height: 50px;
object-fit: cover;
}
.selected.svelte-localhjb {
border: 2px solid #007bff;
}
.face-tabs.svelte-localhjb {
position: absolute;
flex: 0 0 450px;
max-width: 450px;
min-width: 450px;
height: 100%;
overflow: auto;
order: -1;
background-color: var(--body-color);
}
.face-item.svelte-localhjb {
width: 160px;
height: 90px;
border-radius: 5px 5px 0px 0px;
position: relative;
cursor: pointer;
}
.svelte-tabs__tab.svelte-localhjc {
border: none;
border-bottom: 2px solid transparent;
color: #000000;
cursor: pointer;
list-style: none;
display: inline-block;
padding: 0.5em 0.75em;
}
.svelte-tabs__tab.svelte-localhjc:focus {
outline: thin dotted;
}
.svelte-tabs__selected.svelte-localhjc {
border-bottom: 2px solid #4f81e5;
color: #4f81e5;
}
.svelte-tabs__tab-panel.svelte-lcocalhjd {
margin-top: 0.5em;
}
.svelte-tabs__tab-list.svelte-localhje {
border-bottom: 1px solid #cccccc;
margin: 0;
padding: 0;
}

File diff suppressed because one or more lines are too long

View File

@@ -1,25 +0,0 @@
id: LocalVisage
name: Local Visage
metadata:
description: Local Performer Recognition plugin using DeepFace
version: 1.0.1-05b0e72
date: "2025-07-16 04:26:21"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- stashface/utils/__init__.py
- stashface/utils/vtt_parser.py
- stashface/app.py
- stashface/models/image_processor.py
- stashface/models/data_manager.py
- stashface/models/__init__.py
- stashface/models/face_recognition.py
- stashface/web/__init__.py
- stashface/web/interface.py
- .gitignore
- frontend.js
- LocalVisage.yml
- requirements.txt
- LocalVisage.py
- readme.md
- frontend.css

View File

@@ -1,69 +0,0 @@
# Local Performer Recognition
https://discourse.stashapp.cc/t/local-visage/2478
A plugin for recognizing performers from their images using [DeepFace](https://github.com/serengil/deepface). This plugin integrates seamlessly with Stash and enables automatic facial recognition by building or updating a local model trained from your existing image collection.
## 🔍 Features
- **Rebuild Face Recognition Model**
Completely rebuild the local facial recognition model using available images per performer.
- **Update Face Recognition Model**
Incrementally updates the model if performers have fewer images than the configured target count.
- **Automatic Server Control**
Easily start or stop the recognition server as needed—automatically starts when an image is queried.
- **Identify**
Click on the new icon next to an image to trigger performer identification.
## 📦 Requirements
- Python 3.10.11 (temporarily, see instructions below)
- `PythonDepManager`
- `stashUserscriptLibrary7djx1qp` (add repo https://7djx1qp.github.io/stash-plugins/
## ⚙️ Tasks
| Task | Description |
| ---------------------------------- | --------------------------------------------------------------------- |
| **Rebuild Face Recognition Model** | Fully rebuild the DeepFace model for all performers. |
| **Update Face Recognition Model** | Add more images for performers with less than the target image count. |
| **Start Server** | Start the local DeepFace server if it's not already running. |
| **Stop Server** | Gracefully stop the running recognition server. |
## 🔧 Settings
| Setting | Description |
| ------------------------------ | ------------------------------------------------------------------------------- |
| **Target image count per voy** | Number of images to use per performer when training the model. Default is `15`. |
## 🚀 Installation & Setup
### 1. Set Python Path to 3.10.11
To ensure compatibility with DeepFace and the plugins dependency resolution process:
- Temporarily set the Python path in your system/environment to **Python 3.10.11**.
### 2. Rebuild the Model
Run the **"Rebuild Face Recognition Model"** task. This will:
- Set up a virtual environment
- Install all necessary Python dependencies (DeepFace, etc.)
- Build the recognition model
### 3. Restore Python Path (Optional)
Once setup is complete, you can revert your Python path to its original version. The plugin will continue working with the generated virtual environment.
## 🖼 Usage
1. Once the model is built, navigate to an image in your Stash UI.
2. Click the **Performer Recognition** icon overlaying the image.
3. The plugin will:
- Automatically start the recognition server if it's not already running
- Query the server to identify the performer
- Display the matched performer from the trained database

View File

@@ -1,129 +0,0 @@
#Dont install this manually. The plugin will create a venv and install the requirements automatically.
#
#nvidia-cublas-cu12==12.4.5.8
#nvidia-cuda-cupti-cu12==12.4.127
#nvidia-cuda-nvrtc-cu12==12.4.127
#nvidia-cuda-runtime-cu12==12.4.127
#nvidia-cudnn-cu12==9.1.0.70
#nvidia-cufft-cu12==11.2.1.3
#nvidia-curand-cu12==10.3.5.147
#nvidia-cusolver-cu12==11.6.1.9
#nvidia-cusparse-cu12==12.3.1.170
#nvidia-cusparselt-cu12==0.6.2
#nvidia-nccl-cu12==2.21.5
#nvidia-nvjitlink-cu12==12.4.127
#nvidia-nvtx-cu12==12.4.127
stashapp-tools>=0.2.58
absl-py==2.2.2
aiofiles==24.1.0
annotated-types==0.7.0
anyio==4.9.0
astunparse==1.6.3
beautifulsoup4==4.13.4
blinker==1.9.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
contourpy==1.3.2
cycler==0.12.1
deepface @ git+https://github.com/serengil/deepface.git@cc484b54be5188eb47faf132995af16a871d70b9
fastapi==0.115.12
ffmpy==0.5.0
filelock==3.18.0
fire==0.7.0
flask==3.1.0
flask-cors==5.0.1
flatbuffers==25.2.10
fonttools==4.57.0
fsspec==2025.3.2
gast==0.6.0
gdown==5.2.0
google-pasta==0.2.0
gradio==5.25.2
gradio-client==1.8.0
groovy==0.1.2
grpcio==1.71.0
gunicorn==23.0.0
h11==0.14.0
h5py==3.13.0
httpcore==1.0.8
httpx==0.28.1
huggingface-hub==0.30.2
idna==3.10
itsdangerous==2.2.0
jinja2==3.1.6
joblib==1.4.2
keras==3.9.2
kiwisolver==1.4.8
libclang==18.1.1
lz4==4.4.4
markdown==3.8
markdown-it-py==3.0.0
markupsafe==3.0.2
matplotlib==3.10.1
mdurl==0.1.2
ml-dtypes==0.5.1
mpmath==1.3.0
mtcnn==1.0.0
namex==0.0.8
networkx==3.4.2
numpy==2.1.3
opencv-python==4.11.0.86
opt-einsum==3.4.0
optree==0.15.0
orjson==3.10.16
packaging==25.0
pandas==2.2.3
pillow==11.2.1
protobuf==5.29.4
psutil==7.0.0
py-cpuinfo==9.0.0
pycryptodomex==3.22.0
pydantic==2.11.3
pydantic-core==2.33.1
pydub==0.25.1
pygments==2.19.1
pyparsing==3.2.3
pysocks==1.7.1
python-dateutil==2.9.0.post0
python-multipart==0.0.20
pytz==2025.2
pyyaml==6.0.2
pyzipper==0.3.6
requests==2.32.3
retina-face==0.0.17
rich==14.0.0
ruff==0.11.6
safehttpx==0.1.6
scipy==1.15.2
seaborn==0.13.2
semantic-version==2.10.0
setuptools==78.1.0
shellingham==1.5.4
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
starlette==0.46.2
sympy==1.13.1
tensorboard==2.19.0
tensorboard-data-server==0.7.2
tensorflow==2.19.0
termcolor==3.0.1
tf-keras==2.19.0
tomlkit==0.13.2
torch==2.6.0
torchvision==0.21.0
tqdm==4.67.1
typer==0.15.2
typing-extensions==4.13.2
typing-inspection==0.4.0
tzdata==2025.2
ultralytics==8.3.69
ultralytics-thop==2.0.14
urllib3==2.4.0
uvicorn==0.34.2
#voyager==2.1.0
websockets==15.0.1
werkzeug==3.1.3
wheel==0.45.1
wrapt==1.17.2

View File

@@ -1,38 +0,0 @@
import os
import sys
# Set DeepFace home directory
os.environ["DEEPFACE_HOME"] = "."
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Suppress TF logs
# Add the plugins directory to sys.path
plugins_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if plugins_dir not in sys.path:
sys.path.insert(0, plugins_dir)
from stashapi.stashapp import StashInterface
try:
from models.data_manager import DataManager
from web.interface import WebInterface
except ImportError as e:
print(f"Error importing modules: {e}")
input("Ensure you have installed the required dependencies. Press Enter to exit.")
def main():
"""Main entry point for the application"""
# Initialize data manager
data_manager = DataManager(
voy_root_folder=os.path.abspath(os.path.join(os.path.dirname(__file__),"../voy_db")),
)
# Initialize and launch web interface
web_interface = WebInterface(data_manager, default_threshold=0.5)
web_interface.launch(server_name="0.0.0.0", server_port=7860, share=False)
if __name__ == "__main__":
main()

View File

@@ -1,116 +0,0 @@
import os
import sys
import json
from urllib.parse import urlparse
import numpy as np
from typing import Dict, Any, Optional, List
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../py_dependencies/numpy_1.26.4")))
server_connection = json.loads(os.environ.get("SERVER_CONNECTION"))
from stashapi.stashapp import StashInterface
class DataManager:
def __init__(self, voy_root_folder):
"""
Initialize the data manager using folders of .voy files for each model.
Parameters:
voy_root_folder: Path to the root folder containing 'facenet' and 'arc' subfolders.
"""
self.voy_root_folder = voy_root_folder
self.embeddings = {
"facenet": {}, # Dict[str, Dict[str, Any]]
"arc": {}
}
self._load_voy_files()
self.stash = StashInterface(server_connection)
def _load_voy_files(self):
"""Load all .voy files for each model into memory."""
for model in ["facenet", "arc"]:
folder = os.path.join(self.voy_root_folder, model)
self.embeddings[model] = {}
if not os.path.isdir(folder):
continue
for fname in os.listdir(folder):
if fname.endswith(".voy.npy") or fname.endswith(".voy"):
try:
# Remove .voy or .voy.npy
if fname.endswith(".voy.npy"):
id_name = fname[:-8]
else:
id_name = fname[:-4]
stash_id, name = id_name.split("-", 1)
path = os.path.join(folder, fname)
embedding = np.load(path)
self.embeddings[model][stash_id] = {
"name": name,
"embedding": embedding
}
except Exception as e:
print(f"Error loading {fname} for {model}: {e}")
def get_all_ids(self, model: str = "facenet") -> List[str]:
"""Return all performer IDs for a given model."""
return list(self.embeddings.get(model, {}).keys())
def get_performer_info(self, stash_id: str, confidence: float) -> Optional[Dict[str, Any]]:
"""
Get performer information from the loaded embeddings.
Parameters:
stash_id: Stash ID of the performer
confidence: Confidence score (0-1)
Returns:
Dictionary with performer information or None if not found
"""
performer = self.stash.find_performer(stash_id)
if not performer:
# Fallback to embedding name if performer not found
for model in self.embeddings:
if stash_id in self.embeddings[model]:
name = self.embeddings[model][stash_id].get("name", "Unknown")
break
else:
name = "Unknown"
return {
'id': stash_id,
"name": name,
"image": None,
"confidence": int(confidence * 100),
}
return {
'id': stash_id,
"name": performer['name'],
"image": urlparse(performer['image_path']).path if performer.get('image_path') else None,
"confidence": int(confidence * 100),
'country': performer.get('country'),
'distance': int(confidence * 100),
'performer_url': f"/performers/{stash_id}"
}
def query_index(self, model: str, embedding: np.ndarray, limit: int = 5):
"""
Query the loaded embeddings for the closest matches using cosine similarity for a given model.
Parameters:
model: 'facenet' or 'arc'
embedding: The embedding to compare
limit: Number of top matches to return
Returns:
List of (stash_id, distance) tuples, sorted by distance ascending
"""
results = []
for stash_id, data in self.embeddings.get(model, {}).items():
db_embedding = data["embedding"]
sim = np.dot(embedding, db_embedding) / (np.linalg.norm(embedding) * np.linalg.norm(db_embedding))
distance = 1 - sim
results.append((stash_id, distance))
results.sort(key=lambda x: x[1])
return results[:limit]
def query_facenet_index(self, embedding: np.ndarray, limit: int = 5):
"""Query the Facenet index."""
return self.query_index("facenet", embedding, limit)
def query_arc_index(self, embedding: np.ndarray, limit: int = 5):
"""Query the ArcFace index."""
return self.query_index("arc", embedding, limit)

View File

@@ -1,90 +0,0 @@
import os
import numpy as np
from typing import Dict, List, Tuple
from deepface import DeepFace
class EnsembleFaceRecognition:
def __init__(self, model_weights: Dict[str, float] = None):
"""
Initialize ensemble face recognition system.
Parameters:
model_weights: Dictionary mapping model names to their weights.
If None, all models are weighted equally.
"""
self.model_weights = model_weights or {}
self.boost_factor = 1.8
def normalize_distances(self, distances: np.ndarray) -> np.ndarray:
"""Normalize distances to [0,1] range within each model's predictions."""
min_dist = np.min(distances)
max_dist = np.max(distances)
if max_dist == min_dist:
return np.zeros_like(distances)
return (distances - min_dist) / (max_dist - min_dist)
def compute_model_confidence(self, distances: np.ndarray, temperature: float = 0.1) -> np.ndarray:
"""Convert distances to confidence scores for a single model."""
normalized_distances = self.normalize_distances(distances)
exp_distances = np.exp(-normalized_distances / temperature)
return exp_distances / np.sum(exp_distances)
def get_face_embeddings(self, image_path: str) -> Dict[str, np.ndarray]:
"""Get face embeddings for each model from an image path."""
return {
'facenet': DeepFace.represent(img_path=image_path, detector_backend='skip', model_name='Facenet512', normalization='Facenet2018', align=True)[0]['embedding'],
'arc': DeepFace.represent(img_path=image_path, detector_backend='skip', model_name='ArcFace', align=True)[0]['embedding']
}
def ensemble_prediction(
self,
model_predictions: Dict[str, Tuple[List[str], List[float]]],
temperature: float = 0.1,
min_agreement: float = 0.5
) -> List[Tuple[str, float]]:
"""
Combine predictions from multiple models.
Parameters:
model_predictions: Dictionary mapping model names to their (names, distances) predictions.
temperature: Temperature parameter for softmax scaling.
min_agreement: Minimum agreement threshold between models.
Returns:
final_predictions: List of (name, confidence) tuples.
"""
vote_dict = {}
confidence_dict = {}
for model_name, (names, distances) in model_predictions.items():
model_weight = self.model_weights.get(model_name, 1.0)
confidences = self.compute_model_confidence(np.array(distances), temperature)
top_name = names[0]
top_confidence = confidences[0]
vote_dict[top_name] = vote_dict.get(top_name, 0) + model_weight
confidence_dict[top_name] = confidence_dict.get(top_name, [])
confidence_dict[top_name].append(top_confidence)
total_weight = sum(self.model_weights.values()) if self.model_weights else len(model_predictions)
final_results = []
for name, votes in vote_dict.items():
normalized_votes = votes / total_weight
if normalized_votes >= min_agreement:
avg_confidence = np.mean(confidence_dict[name])
final_score = normalized_votes * avg_confidence * self.boost_factor
final_score = min(final_score, 1.0)
final_results.append((name, final_score))
final_results.sort(key=lambda x: x[1], reverse=True)
return final_results
def extract_faces(image_path):
"""Extract faces from an image using DeepFace (YoloV8 backend)."""
return DeepFace.extract_faces(img_path=image_path, detector_backend="yolov8")
def extract_faces_mediapipe(image_path, enforce_detection=False, align=False):
"""Extract faces from an image using MediaPipe backend."""
return DeepFace.extract_faces(img_path=image_path, detector_backend="mediapipe",
enforce_detection=enforce_detection,
align=align)

View File

@@ -1,159 +0,0 @@
import io
import base64
import numpy as np
from uuid import uuid4
from PIL import Image as PILImage
from typing import List, Dict, Any, Tuple
import logging
from models.face_recognition import EnsembleFaceRecognition, extract_faces, extract_faces_mediapipe
from models.data_manager import DataManager
from utils.vtt_parser import parse_vtt_offsets
def get_face_predictions_ensemble(face, data_manager, results=3, max_distance=0.8):
"""
Get predictions for a single face using both Facenet and ArcFace, then ensemble.
Parameters:
face: Face image array
data_manager: DataManager instance
results: Number of results to return
Returns:
List of (stash_id, confidence) tuples
"""
# Get embeddings for original and flipped images, then average
from deepface import DeepFace
embedding_facenet_orig = DeepFace.represent(img_path=face, detector_backend='skip', model_name='Facenet512', normalization='Facenet2018', align=True)[0]['embedding']
embedding_facenet_flip = DeepFace.represent(img_path=np.fliplr(face), detector_backend='skip', model_name='Facenet512', normalization='Facenet2018', align=True)[0]['embedding']
embedding_facenet = np.mean([embedding_facenet_orig, embedding_facenet_flip], axis=0)
embedding_arc_orig = DeepFace.represent(img_path=face, detector_backend='skip', model_name='ArcFace', align=True)[0]['embedding']
embedding_arc_flip = DeepFace.represent(img_path=np.fliplr(face), detector_backend='skip', model_name='ArcFace', align=True)[0]['embedding']
embedding_arc = np.mean([embedding_arc_orig, embedding_arc_flip], axis=0)
# Query DataManager for closest matches for both models
preds_facenet = data_manager.query_facenet_index(embedding_facenet, limit=results)
preds_arc = data_manager.query_arc_index(embedding_arc, limit=results)
# Filter by distance threshold
filtered_facenet = [(stash_id, dist) for stash_id, dist in preds_facenet if dist < max_distance]
filtered_arc = [(stash_id, dist) for stash_id, dist in preds_arc if dist < max_distance]
# Prepare for ensemble
model_predictions = {}
if filtered_facenet:
names_f, dists_f = zip(*filtered_facenet)
model_predictions['facenet'] = (list(names_f), list(dists_f))
if filtered_arc:
names_a, dists_a = zip(*filtered_arc)
model_predictions['arc'] = (list(names_a), list(dists_a))
if not model_predictions:
return []
ensemble = EnsembleFaceRecognition()
return ensemble.ensemble_prediction(model_predictions)
def image_search_performer(image, data_manager, threshold=0.5, results=3):
"""
Search for a performer in an image using both Facenet and ArcFace.
Parameters:
image: PIL Image object
data_manager: DataManager instance
threshold: Confidence threshold
results: Number of results to return
Returns:
List of performer information dictionaries
"""
image_array = np.array(image)
try:
faces = extract_faces(image_array)
except ValueError:
raise ValueError("No faces found")
predictions = get_face_predictions_ensemble(faces[0]['face'], data_manager, results)
logging.info(f"Predictions: {predictions}")
response = []
for stash_id, confidence in predictions:
if confidence < threshold:
continue
performer_info = data_manager.get_performer_info(stash_id, confidence)
if performer_info:
response.append(performer_info)
print(response)
return response
def image_search_performers(image, data_manager, threshold=0.5, results=3):
"""
Search for multiple performers in an image using both Facenet and ArcFace.
Parameters:
image: PIL Image object
data_manager: DataManager instance
threshold: Confidence threshold
results: Number of results to return
Returns:
List of dictionaries with face image and performer information
"""
image_array = np.array(image)
try:
faces = extract_faces(image_array)
except ValueError:
raise ValueError("No faces found")
response = []
for face in faces:
predictions = get_face_predictions_ensemble(face['face'], data_manager, results)
# Crop and encode face image
area = face['facial_area']
cimage = image.crop((area['x'], area['y'], area['x'] + area['w'], area['y'] + area['h']))
buf = io.BytesIO()
cimage.save(buf, format='JPEG')
im_b64 = base64.b64encode(buf.getvalue()).decode('ascii')
# Get performer information
performers = []
for stash_id, confidence in predictions:
if confidence < threshold:
continue
performer_info = data_manager.get_performer_info(stash_id, confidence)
if performer_info:
performers.append(performer_info)
response.append({
'image': im_b64,
'confidence': face['confidence'],
'performers': performers
})
return response
def find_faces_in_sprite(image, vtt_data):
"""
Find faces in a sprite image using VTT data
Parameters:
image: PIL Image object
vtt_data: Base64 encoded VTT data
Returns:
List of dictionaries with face information
"""
vtt = base64.b64decode(vtt_data.replace("data:text/vtt;base64,", ""))
sprite = PILImage.fromarray(image)
results = []
for i, (left, top, right, bottom, time_seconds) in enumerate(parse_vtt_offsets(vtt)):
cut_frame = sprite.crop((left, top, left + right, top + bottom))
faces = extract_faces_mediapipe(np.asarray(cut_frame), enforce_detection=False, align=False)
faces = [face for face in faces if face['confidence'] > 0.6]
if faces:
size = faces[0]['facial_area']['w'] * faces[0]['facial_area']['h']
data = {'id': str(uuid4()), "offset": (left, top, right, bottom), "frame": i, "time": time_seconds, 'size': size}
results.append(data)
return results

View File

@@ -1,44 +0,0 @@
from typing import List, Tuple, Generator
def parse_vtt_offsets(vtt_content: bytes) -> Generator[Tuple[int, int, int, int, float], None, None]:
"""
Parse VTT file content and extract offsets and timestamps.
Parameters:
vtt_content: Raw VTT file content as bytes
Returns:
Generator yielding tuples of (left, top, right, bottom, time_seconds)
"""
time_seconds = 0
left = top = right = bottom = None
for line in vtt_content.decode("utf-8").split("\n"):
line = line.strip()
if "-->" in line:
# grab the start time
# 00:00:00.000 --> 00:00:41.000
start = line.split("-->")[0].strip().split(":")
# convert to seconds
time_seconds = (
int(start[0]) * 3600
+ int(start[1]) * 60
+ float(start[2])
)
left = top = right = bottom = None
elif "xywh=" in line:
left, top, right, bottom = line.split("xywh=")[-1].split(",")
left, top, right, bottom = (
int(left),
int(top),
int(right),
int(bottom),
)
else:
continue
if not left:
continue
yield left, top, right, bottom, time_seconds

View File

@@ -1,174 +0,0 @@
import gradio as gr
from typing import Dict, Any
from models.data_manager import DataManager
from models.image_processor import (
image_search_performer,
image_search_performers,
find_faces_in_sprite
)
class WebInterface:
def __init__(self, data_manager: DataManager, default_threshold: float = 0.5):
"""
Initialize the web interface.
Parameters:
data_manager: DataManager instance
default_threshold: Default confidence threshold
"""
self.data_manager = data_manager
self.default_threshold = default_threshold
def image_search(self, img, threshold, results):
"""Wrapper for the image search function"""
return image_search_performer(img, self.data_manager, threshold, results)
def multiple_image_search(self, img, threshold, results):
"""Wrapper for the multiple image search function"""
return image_search_performers(img, self.data_manager, threshold, results)
def vector_search(self, vector_json, threshold, results):
"""Wrapper for the vector search function (deprecated)"""
return {'status': 'not implemented'}
def _create_image_search_interface(self):
"""Create the single face search interface"""
with gr.Blocks() as interface:
gr.Markdown("# Who is in the photo?")
gr.Markdown("Upload an image of a person and we'll tell you who it is.")
with gr.Row():
with gr.Column():
img_input = gr.Image()
threshold = gr.Slider(
label="threshold",
minimum=0.0,
maximum=1.0,
value=self.default_threshold
)
results_count = gr.Slider(
label="results",
minimum=0,
maximum=50,
value=3,
step=1
)
search_btn = gr.Button("Search")
with gr.Column():
output = gr.JSON(label="Results")
search_btn.click(
fn=self.image_search,
inputs=[img_input, threshold, results_count],
outputs=output
)
return interface
def _create_multiple_image_search_interface(self):
"""Create the multiple face search interface"""
with gr.Blocks() as interface:
gr.Markdown("# Who is in the photo?")
gr.Markdown("Upload an image of a person(s) and we'll tell you who it is.")
with gr.Row():
with gr.Column():
img_input = gr.Image(type="pil")
threshold = gr.Slider(
label="threshold",
minimum=0.0,
maximum=1.0,
value=self.default_threshold
)
results_count = gr.Slider(
label="results",
minimum=0,
maximum=50,
value=3,
step=1
)
search_btn = gr.Button("Search")
with gr.Column():
output = gr.JSON(label="Results")
search_btn.click(
fn=self.multiple_image_search,
inputs=[img_input, threshold, results_count],
outputs=output
)
return interface
def _create_vector_search_interface(self):
"""Create the vector search interface (deprecated)"""
with gr.Blocks() as interface:
gr.Markdown("# Vector Search (deprecated)")
with gr.Row():
with gr.Column():
vector_input = gr.Textbox()
threshold = gr.Slider(
label="threshold",
minimum=0.0,
maximum=1.0,
value=self.default_threshold
)
results_count = gr.Slider(
label="results",
minimum=0,
maximum=50,
value=3,
step=1
)
search_btn = gr.Button("Search")
with gr.Column():
output = gr.JSON(label="Results")
search_btn.click(
fn=self.vector_search,
inputs=[vector_input, threshold, results_count],
outputs=output
)
return interface
def _create_faces_in_sprite_interface(self):
"""Create the faces in sprite interface"""
with gr.Blocks() as interface:
gr.Markdown("# Find Faces in Sprite")
with gr.Row():
with gr.Column():
img_input = gr.Image()
vtt_input = gr.Textbox(label="VTT file")
search_btn = gr.Button("Process")
with gr.Column():
output = gr.JSON(label="Results")
search_btn.click(
fn=find_faces_in_sprite,
inputs=[img_input, vtt_input],
outputs=output
)
return interface
def launch(self, server_name="0.0.0.0", server_port=7860, share=True):
"""Launch the web interface"""
with gr.Blocks() as demo:
with gr.Tabs() as tabs:
with gr.TabItem("Single Face Search"):
self._create_image_search_interface()
with gr.TabItem("Multiple Face Search"):
self._create_multiple_image_search_interface()
with gr.TabItem("Vector Search"):
self._create_vector_search_interface()
with gr.TabItem("Faces in Sprite"):
self._create_faces_in_sprite_interface()
demo.queue().launch(server_name=server_name,server_port=server_port,share=share, ssr_mode=False)

View File

@@ -1,12 +0,0 @@
name: PythonDepManager
description: Manage Python dependencies for CommunityScripts
version: 0.1.0
url: https://github.com/stashapp/CommunityScripts/
exec:
- python
- "{pluginDir}/flush.py"
interface: raw
tasks:
- name: "Flush Dependencies"
description: Flush all cached dependencies

View File

@@ -1,121 +0,0 @@
# PythonDepManager
https://discourse.stashapp.cc/t/pythondepmanager/1801
Python dependency management system for CommunityScripts plugins.
This plugin provides an easy way to install and manage Python package dependencies in your plugins without manual user interaction.
Don't worry about missing dependencies and wrong or conflicting versions anymore.
## Features
- 🚀 Automatic dependency installation and management
- Users won't have to manually install dependencies
- 🔒 Isolated dependency versions
- Specify exact version of your dependencies without worrying about conflicts with other plugin installs
- 📦 Support for multiple package sources:
- PyPI packages with version constraints
- Git repositories (with branch/tag/commit support)
- Custom import names for metapackages
- 🔄 Automatic version resolution and compatibility checking
- 🧹 Easy dependency cleanup and flushing
## Installation
1. Add PythonDepManager as a requirement in your plugin's YAML file:
```yaml
name: YourPlugin
# requires: PythonDepManager
description: Your plugin description
```
## Usage
### Basic Usage
In your plugin's Python code, import and use the dependency manager:
```python
from PythonDepManager import ensure_import
# Install and import a package with specific version
ensure_import("requests==2.26.0")
# Afterwards imports will use only the requested versions
import requests
```
### Advanced Usage
#### Minimum Versions
Define a minimum version to use. This will either use any cached version
which matches or install the latest
```python
from PythonDepManager import ensure_import
ensure_import("requests>=2.26.0")
```
#### Custom Import Names/Meta Packages
Use custom import names for packages with different import names or meta packages
```python
from PythonDepManager import ensure_import
# Install beautifulsoup4 but import as bs4
ensure_import("bs4:beautifulsoup4==4.9.3")
```
```python
from PythonDepManager import ensure_import
# Install stashapp-tools but import as stashapi
ensure_import("stashapi:stashapp-tools==0.2.58")
```
#### Git Repository Dependencies
Install packages directly from Git repositories:
```python
from PythonDepManager import ensure_import
# Install from a Git repository
ensure_import("stashapi@git+https://github.com/user/repo.git")
# Install specific branch/tag
ensure_import("stashapi@git+https://github.com/user/repo.git@main")
# Install specific commit
ensure_import("stashapi@git+https://github.com/user/repo.git@ad483dc")
```
### Multiple Imports
Handle multiple different requirements for imports:
```python
from PythonDepManager import ensure_import
ensure_import(
"requests",
"bs4:beautifulsoup4==4.9.3",
"stashapi:stashapp-tools==0.2.58",
"someothermodule>=0.1",
)
```
### Managing Dependencies
To flush all cached dependencies:
```python
from PythonDepManager import flush_dependencies
flush_dependencies()
```
## Requirements
- Git (for Git repository dependencies)
- pip (Python package installer)

View File

@@ -1,3 +0,0 @@
from .deps import ensure_import
__all__ = ["ensure_import"]

View File

@@ -1,540 +0,0 @@
"""
🐍 Simple dependency management for Python projects.
Automatically installs and manages dependencies in isolated folders.
Supports regular packages, git repositories, and version constraints.
Usage:
Add a dependency to PythonDepManager into your plugin.yml file so it gets installed automatically:
#requires: PythonDepManager
Then, in your python code, you can use the "ensure_import" function to install and manage dependencies:
# Example usage:
from PythonDepManager import ensure_import
ensure_import("requests==2.26.0") # Specific version
ensure_import("requests>=2.25.0") # Minimum version
ensure_import("bs4:beautifulsoup4==4.9.3") # Custom import name/Metapackage Imports
ensure_import("stashapi@git+https://github.com/user/repo.git") # Git repo
ensure_import("stashapi@git+https://github.com/user/repo.git@main") # Git branch/tag
ensure_import("stashapi@git+https://github.com/user/repo.git@abc123") # Git commit
ensure_import("bs4:beautifulsoup4==4.9.3", "requests==2.26.0") # Multiple packages
# If you want to flush all dependencies, you can use the flush_dependencies function:
from PythonDepManager import flush_dependencies
flush_dependencies()
"""
import sys
import subprocess
import re
import importlib
import importlib.metadata
import hashlib
import os
from pathlib import Path
from inspect import stack
from typing import Optional, List, Set, Tuple
from dataclasses import dataclass
from PythonDepManager import log
@dataclass(frozen=True)
class PackageInfo:
"""Immutable representation of a package specification."""
import_name: str
pip_name: str
version: Optional[str] = None
min_version: Optional[str] = None
git_url: Optional[str] = None
git_ref: Optional[str] = None
@property
def is_git(self) -> bool:
return self.git_url is not None
@property
def is_min_version(self) -> bool:
return self.min_version is not None
def __str__(self) -> str:
if self.is_git:
ref = f"@{self.git_ref}" if self.git_ref else ""
return f"{self.import_name} (git{ref})"
elif self.is_min_version:
return f"{self.pip_name}>={self.min_version}"
elif self.version:
return f"{self.pip_name}=={self.version}"
else:
return self.pip_name
def check_system_requirements() -> None:
"""Ensure git and pip are available."""
for cmd, name in [
(["git", "--version"], "git"),
([sys.executable, "-m", "pip", "--version"], "pip"),
]:
try:
subprocess.run(
cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True
)
except (FileNotFoundError, subprocess.CalledProcessError):
log.throw(f"PythonDepManager: ❌ {name} is required but not available")
def run_git_command(args: List[str]) -> Optional[str]:
"""Run a git command and return the first 7 characters of output."""
try:
result = subprocess.run(
["git"] + args, capture_output=True, text=True, timeout=10, check=True
)
return result.stdout.split()[0][:7] if result.stdout.strip() else None
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
FileNotFoundError,
IndexError,
):
return None
def parse_package_spec(spec: str) -> PackageInfo:
"""Parse a package specification into structured information."""
# Split custom import name from package spec
if ":" in spec and not spec.startswith("git+") and "@git+" not in spec:
import_name, package_spec = spec.split(":", 1)
else:
import_name, package_spec = "", spec
# Handle git packages
if "@git+" in package_spec:
import_name = import_name or package_spec.split("@")[0]
git_url = package_spec.split("@git+", 1)[1]
if "@" in git_url:
git_url, git_ref = git_url.rsplit("@", 1)
else:
git_ref = None
return PackageInfo(
import_name=import_name,
pip_name="",
git_url=f"git+{git_url}",
git_ref=git_ref,
)
# Handle version constraints
if ">=" in package_spec:
match = re.match(r"^([^>=]+)>=(.+)$", package_spec)
if not match:
log.throw(
f"PythonDepManager: ❌ Invalid version constraint: {package_spec}"
)
pip_name, min_version = match.groups()
return PackageInfo(
import_name=import_name or pip_name,
pip_name=pip_name,
min_version=min_version,
)
# Handle exact version or no version
match = re.match(r"^([^=@]+)(?:==(.+))?$", package_spec)
if not match:
log.throw(f"PythonDepManager: ❌ Invalid package specification: {package_spec}")
pip_name, version = match.groups()
return PackageInfo(
import_name=import_name or pip_name, pip_name=pip_name, version=version
)
def compare_versions(v1: str, v2: str) -> int:
"""Compare version strings. Returns -1, 0, or 1."""
try:
# Try using packaging library if available
from packaging import version
ver1, ver2 = version.parse(v1), version.parse(v2)
return -1 if ver1 < ver2 else (1 if ver1 > ver2 else 0)
except ImportError:
# Fallback to simple numeric comparison
try:
def normalize(v: str) -> List[int]:
return [int(x) for x in v.split(".")]
parts1, parts2 = normalize(v1), normalize(v2)
max_len = max(len(parts1), len(parts2))
parts1.extend([0] * (max_len - len(parts1)))
parts2.extend([0] * (max_len - len(parts2)))
for a, b in zip(parts1, parts2):
if a < b:
return -1
if a > b:
return 1
return 0
except ValueError:
return -1 if v1 < v2 else (1 if v1 > v2 else 0)
def find_compatible_version(pkg: PackageInfo, base_folder: Path) -> Optional[str]:
"""Find the best compatible version already installed."""
if not pkg.is_min_version or not base_folder.exists():
return None
compatible_versions = []
prefix = f"{pkg.import_name}_"
for folder in base_folder.iterdir():
if not (folder.is_dir() and folder.name.startswith(prefix)):
continue
version_part = folder.name[len(prefix) :]
if version_part and "git_" not in version_part:
try:
if compare_versions(version_part, pkg.min_version) >= 0:
compatible_versions.append(version_part)
except ValueError:
continue
if compatible_versions:
try:
return max(
compatible_versions, key=lambda v: [int(x) for x in v.split(".")]
)
except ValueError:
return max(compatible_versions)
return None
def get_git_commit_hash(git_url: str, ref: Optional[str] = None) -> Optional[str]:
"""Get commit hash from git remote."""
clean_url = git_url[4:] if git_url.startswith("git+") else git_url
clean_url = clean_url.split("@")[0]
if ref:
for ref_type in ["heads", "tags"]:
result = run_git_command(["ls-remote", clean_url, f"refs/{ref_type}/{ref}"])
if result:
return result
else:
return run_git_command(["ls-remote", clean_url, "HEAD"])
return None
def get_folder_name(pkg: PackageInfo, base_folder: Path) -> str:
"""Generate folder name for package installation."""
if pkg.is_git:
if pkg.git_ref and re.match(r"^[a-f0-9]{7,40}$", pkg.git_ref):
commit_hash = pkg.git_ref[:7]
else:
commit_hash = get_git_commit_hash(pkg.git_url, pkg.git_ref)
if commit_hash:
return f"{pkg.import_name}_git_{commit_hash}"
else:
url_hash = hashlib.md5(pkg.git_url.encode()).hexdigest()[:7]
return f"{pkg.import_name}_git_{url_hash}"
elif pkg.is_min_version:
compatible_version = find_compatible_version(pkg, base_folder)
return (
f"{pkg.import_name}_{compatible_version}"
if compatible_version
else f"{pkg.import_name}_latest"
)
else:
return f"{pkg.import_name}_{pkg.version}" if pkg.version else pkg.import_name
def get_base_folder() -> Path:
"""Get the base folder for automatic dependencies."""
caller_file = stack()[2].filename
if caller_file.startswith("<") or not caller_file:
log.throw(
"PythonDepManager: ❌ Cannot determine caller location", e_type=RuntimeError
)
caller_path = Path(caller_file).resolve()
deps_folder = caller_path.parent.parent / "py_dependencies"
try:
deps_folder.mkdir(parents=True, exist_ok=True)
# Test write permissions
test_file = deps_folder / ".write_test"
test_file.touch()
test_file.unlink()
except (OSError, PermissionError) as e:
log.throw(
f"PythonDepManager: ❌ Cannot access dependencies folder '{deps_folder}': {e}",
e_type=RuntimeError,
e_from=e,
)
return deps_folder
def is_package_available(pkg: PackageInfo, base_folder: Path) -> bool:
"""Check if managed package is already available and satisfies requirements."""
# For ensure_import, we only care about our managed dependencies
# System-installed packages are ignored to ensure we use the managed version
folder = base_folder / get_folder_name(pkg, base_folder)
if not folder.exists():
return False
# Check if the managed package folder is in sys.path
folder_str = os.path.normpath(str(folder.resolve()))
if folder_str not in sys.path:
return False
# Try importing from the managed location
try:
# Temporarily prioritize our managed path
original_path = sys.path[:]
sys.path.insert(0, folder_str)
# Clear any existing module to force reload from managed location
if pkg.import_name in sys.modules:
del sys.modules[pkg.import_name]
# Clear import caches
importlib.invalidate_caches()
# Try importing
importlib.import_module(pkg.import_name)
# Restore original path order (our managed paths should already be at front)
sys.path[:] = original_path
if folder_str not in sys.path:
sys.path.insert(0, folder_str)
return True
except ImportError:
# Restore original path
sys.path[:] = original_path
if folder_str not in sys.path:
sys.path.insert(0, folder_str)
return False
def get_install_spec(pkg: PackageInfo) -> str:
"""Get the pip install specification for a package."""
if pkg.is_git:
return f"{pkg.git_url}@{pkg.git_ref}" if pkg.git_ref else pkg.git_url
return f"{pkg.pip_name}=={pkg.version}" if pkg.version else pkg.pip_name
def install_package(pkg: PackageInfo, folder: Path) -> None:
"""Install package to specified folder."""
folder.mkdir(parents=True, exist_ok=True)
install_spec = get_install_spec(pkg)
subprocess.run(
[
sys.executable,
"-m",
"pip",
"install",
"--no-input",
"--upgrade",
"--force-reinstall",
"--quiet",
f"--target={folder.resolve()}",
install_spec,
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
def add_to_path(folder: Path, current_paths: Set[str]) -> None:
"""Add folder to front of sys.path to ensure managed dependencies are prioritized."""
folder_str = os.path.normpath(str(folder.resolve()))
# Remove from current position if it exists to avoid duplicates
if folder_str in sys.path:
sys.path.remove(folder_str)
current_paths.discard(folder_str)
# Always add to front to ensure priority over system packages
sys.path.insert(0, folder_str)
current_paths.add(folder_str)
def clear_import_caches() -> None:
"""Clear Python import caches."""
importlib.invalidate_caches()
try:
importlib.metadata._cache.clear()
except AttributeError:
pass
def remove_existing_modules(packages: List[PackageInfo]) -> None:
"""Remove existing modules for managed packages to ensure we use managed versions."""
for pkg in packages:
# Remove the main module
if pkg.import_name in sys.modules:
log.debug(
f"PythonDepManager: 🔄 Removing existing module '{pkg.import_name}' to use managed version"
)
del sys.modules[pkg.import_name]
# Also remove any submodules that might be cached
modules_to_remove = []
for module_name in sys.modules:
if module_name.startswith(f"{pkg.import_name}."):
modules_to_remove.append(module_name)
for module_name in modules_to_remove:
log.debug(
f"PythonDepManager: 🔄 Removing existing submodule '{module_name}' to use managed version"
)
del sys.modules[module_name]
def process_packages(deps: Tuple[str, ...]) -> Tuple[List[PackageInfo], Path]:
"""Parse dependencies and prepare base folder."""
check_system_requirements()
base_folder = get_base_folder()
packages = []
for dep in deps:
try:
packages.append(parse_package_spec(dep))
except ValueError as e:
log.throw(
f"PythonDepManager: ❌ Invalid package spec '{dep}': {e}",
e_type=ValueError,
e_from=e,
)
if not packages:
log.throw(
"PythonDepManager: ❌ No valid package specifications found",
e_type=ValueError,
)
return packages, base_folder
def setup_existing_packages(packages: List[PackageInfo], base_folder: Path) -> Set[str]:
"""Add existing package folders to sys.path and ensure managed packages are prioritized."""
# First, remove any existing modules for packages we're managing
# This ensures we use the managed version instead of system-installed ones
remove_existing_modules(packages)
current_paths = set(sys.path)
managed_paths = []
for pkg in packages:
folder = base_folder / get_folder_name(pkg, base_folder)
if folder.exists():
folder_str = os.path.normpath(str(folder.resolve()))
managed_paths.append(folder_str)
# Remove from current position if it exists
if folder_str in sys.path:
sys.path.remove(folder_str)
current_paths.discard(folder_str)
# Add all managed paths to the front of sys.path to ensure priority
for folder_str in reversed(managed_paths): # Reverse to maintain order
sys.path.insert(0, folder_str)
current_paths.add(folder_str)
clear_import_caches()
return current_paths
def install_missing_packages(
packages: List[PackageInfo], base_folder: Path, current_paths: Set[str]
) -> None:
"""Install packages that aren't already satisfied."""
# Handle minimum version packages by finding compatible versions
resolved_packages = []
for pkg in packages:
if pkg.is_min_version:
compatible_version = find_compatible_version(pkg, base_folder)
if compatible_version:
# Create new package info with resolved version
resolved_pkg = PackageInfo(
import_name=pkg.import_name,
pip_name=pkg.pip_name,
version=compatible_version,
)
resolved_packages.append(resolved_pkg)
else:
resolved_packages.append(pkg)
else:
resolved_packages.append(pkg)
to_install = [
pkg for pkg in resolved_packages if not is_package_available(pkg, base_folder)
]
if not to_install:
log.debug("PythonDepManger: ✅ All dependencies satisfied")
return
# Remove existing modules for packages we're about to install
# This ensures we use the newly installed managed version
remove_existing_modules(to_install)
for pkg in to_install:
folder_name = get_folder_name(pkg, base_folder)
folder = base_folder / folder_name
log.info(f"PythonDepManager: 📦 Installing {pkg}{folder_name}")
try:
install_package(pkg, folder)
add_to_path(folder, current_paths)
log.info(f"PythonDepManager: ✅ Successfully installed {pkg.import_name}")
except Exception as e:
log.throw(
f"PythonDepManager: ❌ Failed to install {pkg.import_name}: {e}",
e_type=RuntimeError,
e_from=e,
)
clear_import_caches()
def ensure_import(*deps: str) -> None:
"""
🎯 Install and import dependencies automatically.
⚠️ IMPORTANT: This function always prioritizes managed dependencies over system-installed ones.
When you use ensure_import, any existing system-installed versions of the specified packages
will be ignored in favor of the managed versions in the py_dependencies folder.
Supported formats:
• Regular: "requests", "requests==2.26.0"
• Version ranges: "requests>=2.25.0"
• Custom import name/Metapackage Imports: "bs4:beautifulsoup4==4.9.3"
• Git: "stashapi@git+https://github.com/user/repo.git"
• Git with ref: "stashapi@git+https://github.com/user/repo.git@main"
"""
if not deps:
return
try:
packages, base_folder = process_packages(deps)
current_paths = setup_existing_packages(packages, base_folder)
install_missing_packages(packages, base_folder, current_paths)
# Final cache clear to ensure all imports use managed versions
clear_import_caches()
except (RuntimeError, ValueError) as e:
log.throw(f"PythonDepManager: ❌ {e}", e_type=RuntimeError, e_from=e)

View File

@@ -1,15 +0,0 @@
import log
import shutil
from deps import get_base_folder
def flush_dependencies() -> None:
"""Delete all dependencies in the base folder"""
# get working directory
plugin_folder = get_base_folder()
log.info(f"Flushing dependencies from {plugin_folder}")
shutil.rmtree(plugin_folder)
if __name__ == "__main__":
flush_dependencies()

View File

@@ -1,30 +0,0 @@
import sys
import re
from functools import partial
def _log(level_char: str, s):
lvl_char = "\x01{}\x02".format(level_char)
s = re.sub(r"data:.+?;base64[^'\"]+", "[...]", str(s))
for line in s.splitlines():
print(lvl_char, line, file=sys.stderr, flush=True)
trace = partial(_log, "t")
debug = partial(_log, "d")
info = partial(_log, "i")
warning = partial(_log, "w")
error = partial(_log, "e")
def throw(s, e_type=None, e_from=None):
error(s)
if e_type and e_from:
raise e_type(s) from e_from
elif e_type and not e_from:
raise e_type(s)
elif not e_type and e_from:
raise Exception(s) from e_from
else:
raise Exception(s)

View File

@@ -1,15 +0,0 @@
id: PythonDepManager
name: PythonDepManager
metadata:
description: Manage Python dependencies for CommunityScripts
version: 0.1.0-4ccbf2f
date: "2025-05-31 14:07:39"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- __init__.py
- README.md
- flush.py
- log.py
- deps.py
- PythonDepManager.yml

View File

@@ -1,6 +0,0 @@
name: Stash Userscript Library
description: Exports utility functions and a Stash class that emits events whenever a GQL response is received and whenenever a page navigation change is detected
version: 1.0
ui:
javascript:
- stashUserscriptLibrary.js

View File

@@ -1,12 +0,0 @@
id: StashUserscriptLibrary
name: Stash Userscript Library
metadata:
description: Exports utility functions and a Stash class that emits events whenever
a GQL response is received and whenenever a page navigation change is detected
version: 1.0-abb9372
date: "2024-03-10 21:48:44"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- StashUserscriptLibrary.yml
- stashUserscriptLibrary.js

View File

@@ -1,22 +0,0 @@
# CJ's Card Tweaks
https://discourse.stashapp.cc/t/cjs-card-tweaks/1342
This plugin contains the various tweaks I've made to my Stash cards for anyone who may be interested. Each tweak will be toggleable, so users have the option to subscribe to some without subscribing to all.
## Tweaks
### File Count on Cards
![Screenshot 2024-07-24 173921](https://github.com/user-attachments/assets/8eaf0dce-a6c2-4d92-aa78-7ddc2322392a)
Scenes, Galleries, or Images with a file count greater than one will have a badge similar to the badge present in the file count tab when more than one file is present. This badge will be located at the top right on the card where the studio logo used to live. ATM, the CSS that relocates the studio logo to the left of the title card, is not included as a toggleable tweak, but I plan to extract that out of my SCSS theme project and incorporate it here as a toggleable tweak. Until then, users who aren't using any other plugins that reposition the studio logo can tweak the CSS to reposition the file count to a new location.
### 3D rating banner
![Screenshot 2024-07-29 131937](https://github.com/user-attachments/assets/64d03cd7-6e31-4373-b831-e99a942216cf)
Adds an additional dimension to the rating banners.
### Performer profile cards
![unnamed_2](https://github.com/user-attachments/assets/f505417d-ed0c-40c4-9c78-647081a41307)
Modify the performer cards to use a traditional profile design

View File

@@ -1,272 +0,0 @@
(async () => {
"use strict";
const userSettings = await csLib.getConfiguration("cjCardTweaks", {});
const SETTINGS = parseSettings(userSettings ?? "");
const CARD_KEYS = {
galleries: "gallery",
images: "image",
movies: "movie",
performers: "performer",
scenes: "scene",
studios: "studio",
};
const CARDS = Object.entries(CARD_KEYS).reduce((acc, [plural, singular]) => {
acc[singular] = {
class: `${singular}-card`,
data: stash[plural],
isContentCard: ["scene", "gallery", "image"].includes(singular),
};
return acc;
}, {});
function parseSettings(settings) {
return Object.keys(settings).reduce((acc, key) => {
if (
key === "fileCount" ||
key === "addBannerDimension" ||
key === "performerProfileCards"
) {
acc[key] = settings[key];
} else {
// does nothing for now
}
return acc;
}, {});
}
const FILE_COUNT_STYLE =
"span.file-count.badge.badge-pill.badge-info{position: absolute;top: 0.3rem;right: 0.5rem;border-radius: 50%;width: 1.7rem;height: 1.7rem;padding: 5px 8px;font-size: 100%;box-shadow: 1px 3px 4px rgba(0, 0, 0, 0.5)}.grid-card:hover .file-count.badge{opacity: 0;transition: opacity 0.5s}";
const PERFORMER_PROFILE_CARD_STYLE =
".performer-card:hover img.performer-card-image{box-shadow: 0 0 0 rgb(0 0 0 / 20%), 0 0 6px rgb(0 0 0 / 90%);transition: box-shadow .5s .5s}@media (min-width: 1691px){.performer-recommendations .card .performer-card-image{height: unset}}button.btn.favorite-button.not-favorite,button.btn.favorite-button.favorite{transition: filter .5s .5s}.performer-card:hover .thumbnail-section button.btn.favorite-button.not-favorite, .performer-card:hover .thumbnail-section button.btn.favorite-button.favorite{filter: drop-shadow(0 0 2px rgba(0, 0, 0, .9))}.performer-card .thumbnail-section button.btn.favorite-button.not-favorite, .performer-card .thumbnail-section button.btn.favorite-button.favorite{top: 10px;filter: drop-shadow(0 2px 2px rgba(0, 0, 0, .9))}.item-list-container .performer-card__age,.recommendation-row .performer-card__age,.item-list-container .performer-card .card-section-title,.recommendation-row .performer-card .card-section-title,.item-list-container .performer-card .thumbnail-section,.recommendation-row .performer-card .thumbnail-section{display: flex;align-content: center;justify-content: center}.item-list-container .performer-card .thumbnail-section a,.recommendation-row .performer-card .thumbnail-section a{display: contents}.item-list-container .performer-card-image,.recommendation-row .performer-card-image{aspect-ratio: 1 / 1;display: flex;object-fit: cover;border: 3px solid var(--plex-yelow);border-radius: 50%;min-width: unset;position: relative;width: 58%;margin: auto;z-index: 1;margin-top: 1.5rem;box-shadow:0 13px 26px rgb(0 0 0 / 20%),0 3px 6px rgb(0 0 0 / 90%);object-position: center;transition: box-shadow .5s .5s}.item-list-container .performer-card hr,.recommendation-row .performer-card hr{width: 90%}.item-list-container .performer-card .fi,.recommendation-row .performer-card .fi{position: absolute;top: 81.5%;left: 69%;border-radius: 50% !important;background-size: cover;margin-left: -1px;height: 1.5rem;width: 1.5rem;z-index: 10;border: solid 2px #252525;box-shadow: unset}.item-list-container .performer-card .card-popovers .btn,.recommendation-row .performer-card .card-popovers .btn{font-size: 0.9rem}";
const RATING_BANNER_3D_STYLE =
".grid-card{overflow:unset}.detail-group .rating-banner-3d,.rating-banner{display:none}.grid-card:hover .rating-banner-3d{opacity:0;transition:opacity .5s}.rating-banner-3d{height:110px;left:-6px;overflow:hidden;position:absolute;top:-6px;width:110px}.rating-banner-3d span{box-shadow:0 5px 4px rgb(0 0 0 / 50%);position:absolute;display:block;width:170px;padding:10px 5px 10px 0;background-color:#ff6a07;color:#fff;font:700 1rem/1 Lato,sans-serif;text-shadow:0 1px 1px rgba(0,0,0,.2);text-transform:uppercase;text-align:center;letter-spacing:1px;right:-20px;top:24px;transform:rotate(-45deg)}.rating-banner-3d::before{top:0;right:0;position:absolute;z-index:-1;content:'';display:block;border:5px solid #a34405;border-top-color:transparent;border-left-color:transparent}.rating-banner-3d::after{bottom:0;left:0;position:absolute;z-index:-1;content:'';display:block;border:5px solid #963e04}";
/**
* Element to inject custom CSS styles.
*/
const styleElement = document.createElement("style");
document.head.appendChild(styleElement);
if (SETTINGS.fileCount) styleElement.innerHTML += FILE_COUNT_STYLE;
if (SETTINGS.addBannerDimension)
styleElement.innerHTML += RATING_BANNER_3D_STYLE;
if (SETTINGS.performerProfileCards)
styleElement.innerHTML += PERFORMER_PROFILE_CARD_STYLE;
function createElementFromHTML(htmlString) {
const div = document.createElement("div");
div.innerHTML = htmlString.trim();
return div.firstChild;
}
// Mapping of configuration keys to functions
const cardsHandlers = {
gallery: handleGalleriesCards,
image: handleImagesCards,
movie: handleMoviesCards,
performer: handlePerformersCards,
scene: handleScenesCards,
studio: handleStudiosCards,
};
// Handle home cards
handleHomeHotCards();
for (const [key, card] of Object.entries(CARDS)) {
if (cardsHandlers[key]) {
cardsHandlers[key]();
}
}
/**
* Add cards on home page.
*/
function handleHomeHotCards() {
const pattern = /^(\/)?$/;
registerPathChangeListener(pattern, () => {
setTimeout(() => {
for (const card of Object.values(CARDS)) handleCards(card, true);
}, 3000);
});
}
/**
* Handles gallery cards to specific paths in Stash.
*
* The supported paths are:
* - /galleries
* - /performers/{id}/galleries
* - /studios/{id}/galleries
* - /tags/{id}/galleries
* - /scenes/{id}
*/
function handleGalleriesCards() {
const pattern =
/^\/(galleries|(performers|studios|tags)\/\d+\/galleries|scenes\/\d+)$/;
tweakCards(pattern, CARDS.gallery);
}
/**
* Handles image cards to specific paths in Stash.
*
* The supported paths are:
* - /images
* - /performers/{id}/images
* - /studios/{id}/images
* - /tags/{id}/images
* - /galleries/{id}
*/
function handleImagesCards() {
const pattern =
/^\/(images|(performers|studios|tags)\/\d+\/images|galleries\/\d+)$/;
tweakCards(pattern, CARDS.image);
}
/**
* Handles movie cards to specific paths in Stash.
*
* The supported paths are:
* - /movies
* - /performers/{id}/movies
* - /studios/{id}/movies
* - /tags/{id}/movies
* - /scenes/{id}
*/
function handleMoviesCards() {
const pattern =
/^\/(movies|(performers|studios|tags)\/\d+\/movies|scenes\/\d+)$/;
tweakCards(pattern, CARDS.movie);
}
/**
* Handles performer cards to specific paths in Stash.
*
* The supported paths are:
* - /performers
* - /performers/{id}/appearswith
* - /studios/{id}/performers
* - /tags/{id}/performers
* - /scenes/{id}
* - /galleries/{id}
* - /images/{id}
*/
function handlePerformersCards() {
const pattern =
/^\/(performers(?:\/\d+\/appearswith)?|(performers|studios|tags)\/\d+\/performers|(scenes|galleries|images)\/\d+)$/;
tweakCards(pattern, CARDS.performer);
}
/**
* Handles scene cards to specific paths in Stash.
*
* The supported paths are:
* - /scenes
* - /performers/{id}/scenes
* - /studios/{id}/scenes
* - /tags/{id}/scenes
* - /movies/{id}
* - /galleries/{id}
*/
function handleScenesCards() {
const pattern =
/^\/(scenes|(performers|studios|tags|movies)\/\d+\/scenes|(movies|galleries)\/\d+)$/;
tweakCards(pattern, CARDS.scene);
}
/**
* Handles studio cards to specific paths in Stash.
*
* The supported paths are:
* - /studios
* - /studios/{id}/childstudios
* - /tags/{id}/studios
*/
function handleStudiosCards() {
const pattern =
/^\/(studios|(studios\/\d+\/childstudios)|(tags\/\d+\/studios))$/;
tweakCards(pattern, CARDS.studio);
}
function tweakCards(pattern, card) {
registerPathChangeListener(pattern, () => {
handleCards(card);
});
}
function handleCards(card, isHome = false) {
waitForClass(card.class, () => {
executeTweaks(card.data, card.class, card.isContentCard);
});
}
function executeTweaks(stashData, cardClass, isContentCard) {
const cards = document.querySelectorAll(`.${cardClass}`);
cards.forEach((card) => {
maybeAddFileCount(card, stashData, isContentCard);
maybeAddDimensionToBanner(card);
});
}
/**
* Add badge with file count on cards with more than 1 associated file
*
* @param {Element} card - Card element cards list.
* @param {Object} stashData - Data fetched from the GraphQL interceptor. e.g. stash.performers.
* @param {boolean} isContentCard - Flag indicating if card is a content card.
*/
function maybeAddFileCount(card, stashData, isContentCard) {
if (!SETTINGS.fileCount || !isContentCard) return;
// verify this function was not run twice on the same card for some strange reason
const fileCountBadge = card.querySelector(".file-count");
if (fileCountBadge) return;
const link = card.querySelector(".thumbnail-section > a");
const id = new URL(link.href).pathname.split("/").pop();
const data = stashData[id];
if (!data || data.files.length <= 1) return;
const el = createElementFromHTML(
`<span class="file-count badge badge-pill badge-info">` +
data?.files.length +
`</span>`
);
link.parentElement.appendChild(el);
}
/**
* Add additional dimention to rating banner
*
* @param {Element} card - Card element cards list.
*/
function maybeAddDimensionToBanner(card) {
if (!SETTINGS.addBannerDimension) return;
const oldBanner = card.querySelector(".rating-banner");
if (!oldBanner) return;
const link = card.querySelector(".thumbnail-section > a");
const rating = oldBanner.textContent;
const color = window.getComputedStyle(oldBanner).backgroundColor;
const colorClass =
oldBanner.className.replace("rating-banner", "").trim() + "-3d";
if (!styleElement.innerHTML.includes(colorClass)) {
styleElement.innerHTML += `.${colorClass} span {background-color: ${color};}`;
styleElement.innerHTML += `.rating-banner-3d.${colorClass}:before {border: 5px solid ${color}; filter: brightness(0.9);}`;
styleElement.innerHTML += `.rating-banner-3d.${colorClass}:after {border: 5px solid ${color}; filter: brightness(0.9);}`;
}
const el = createElementFromHTML(
`<div class="rating-banner-3d ${colorClass}"><span>${rating}</span></div>`
);
const span = el.querySelector("span");
span.style.backgroundColor = color;
link.parentElement.appendChild(el);
oldBanner.remove();
}
})();

View File

@@ -1,26 +0,0 @@
name: CJ's Card Tweaks.
description: Provides various tweaks for the Stash Cards.
version: 1.1
# requires: CommunityScriptsUILibrary
ui:
requires:
- CommunityScriptsUILibrary
javascript:
- https://cdn.jsdelivr.net/gh/HandyRandyx/stash-plugins@main/utils/fetchInterceptor.js
- https://cdn.jsdelivr.net/gh/HandyRandyx/stash-plugins@main/utils/stashHandler.js
- https://cdn.jsdelivr.net/gh/HandyRandyx/stash-plugins@main/utils/registerPathChangeListener.js
- https://cdn.jsdelivr.net/gh/cj12312021/stash-plugins@main/utils/waitForClass.js
- cjCardTweaks.js
settings:
addBannerDimension:
displayName: 3D rating banner
description: "Adds additional dimension to the rating banners."
type: BOOLEAN
fileCount:
displayName: Enable for file count
description: "Displays file count on scene, gallery, and image cards."
type: BOOLEAN
performerProfileCards:
displayName: Performer profile cards
description: "Tweaks performer cards to use a traditional profile design."
type: BOOLEAN

View File

@@ -1,12 +0,0 @@
id: cjCardTweaks
name: CJ's Card Tweaks.
metadata:
description: Provides various tweaks for the Stash Cards.
version: 1.1-034a8a7
date: "2025-05-18 19:37:19"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- README.md
- cjCardTweaks.yml
- cjCardTweaks.js

View File

@@ -1,285 +0,0 @@
[250916 11:01:37] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250916 11:01:37] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250916 11:01:37] [LN:1001] INF: FileMonitor is NOT running!!!
[250916 11:01:37] [LN:1002] INF: getFileMonitorRunningStatus complete
[250916 11:05:37] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250916 11:05:37] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250916 11:05:37] [LN:1001] INF: FileMonitor is NOT running!!!
[250916 11:05:37] [LN:1002] INF: getFileMonitorRunningStatus complete
[250916 11:09:40] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250916 11:09:40] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250916 11:09:40] [LN:1001] INF: FileMonitor is NOT running!!!
[250916 11:09:40] [LN:1002] INF: getFileMonitorRunningStatus complete
[250917 17:16:01] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250917 17:16:01] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250917 17:16:01] [LN:1001] INF: FileMonitor is NOT running!!!
[250917 17:16:01] [LN:1002] INF: getFileMonitorRunningStatus complete
[250917 17:16:13] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250917 17:16:13] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250917 17:16:13] [LN:1001] INF: FileMonitor is NOT running!!!
[250917 17:16:13] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 08:50:27] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 08:50:27] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 08:50:27] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 08:50:27] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 08:55:10] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 08:55:10] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 08:55:10] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 08:55:10] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 09:02:14] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 09:02:14] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 09:02:14] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 09:02:14] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 09:02:15] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 09:02:15] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 09:02:15] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 09:02:15] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 09:02:21] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 09:02:21] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 09:02:21] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 09:02:21] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 09:02:23] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 09:02:23] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 09:02:23] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 09:02:23] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 09:02:44] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 09:02:44] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 09:02:44] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 09:02:44] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 15:31:22] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 15:31:22] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 15:31:22] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 15:31:22] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 15:32:13] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 15:32:13] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 15:32:13] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 15:32:13] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 15:32:18] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 15:32:18] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 15:32:18] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 15:32:18] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 22:26:25] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 22:26:25] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 22:26:25] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 22:26:25] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 23:16:26] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 23:16:26] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 23:16:26] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 23:16:26] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 23:28:50] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 23:28:50] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 23:28:50] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 23:28:50] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 23:28:56] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 23:28:56] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 23:28:56] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 23:28:56] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 23:29:02] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 23:29:02] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 23:29:02] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 23:29:02] [LN:1002] INF: getFileMonitorRunningStatus complete
[250918 23:29:04] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250918 23:29:04] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250918 23:29:04] [LN:1001] INF: FileMonitor is NOT running!!!
[250918 23:29:04] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 01:58:20] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 01:58:20] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 01:58:20] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 01:58:20] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 02:13:21] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 02:13:21] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 02:13:21] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 02:13:21] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 02:14:39] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 02:14:39] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 02:14:39] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 02:14:39] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 20:05:32] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 20:05:32] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 20:05:32] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 20:05:32] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 21:17:46] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 21:17:46] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 21:17:46] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 21:17:46] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 21:17:52] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 21:17:52] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 21:17:52] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 21:17:52] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 21:19:04] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 21:19:04] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 21:19:04] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 21:19:04] [LN:1002] INF: getFileMonitorRunningStatus complete
[250921 22:39:16] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250921 22:39:16] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250921 22:39:16] [LN:1001] INF: FileMonitor is NOT running!!!
[250921 22:39:16] [LN:1002] INF: getFileMonitorRunningStatus complete
[250922 00:01:10] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250922 00:01:10] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250922 00:01:10] [LN:1001] INF: FileMonitor is NOT running!!!
[250922 00:01:10] [LN:1002] INF: getFileMonitorRunningStatus complete
[250922 11:17:09] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250922 11:17:09] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250922 11:17:09] [LN:1001] INF: FileMonitor is NOT running!!!
[250922 11:17:09] [LN:1002] INF: getFileMonitorRunningStatus complete
[250924 11:10:57] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250924 11:10:57] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250924 11:10:57] [LN:1001] INF: FileMonitor is NOT running!!!
[250924 11:10:57] [LN:1002] INF: getFileMonitorRunningStatus complete
[250924 21:31:01] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250924 21:31:01] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250924 21:31:01] [LN:1001] INF: FileMonitor is NOT running!!!
[250924 21:31:01] [LN:1002] INF: getFileMonitorRunningStatus complete
[250929 23:36:43] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250929 23:36:43] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250929 23:36:43] [LN:1001] INF: FileMonitor is NOT running!!!
[250929 23:36:43] [LN:1002] INF: getFileMonitorRunningStatus complete
[250929 23:51:22] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250929 23:51:22] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250929 23:51:22] [LN:1001] INF: FileMonitor is NOT running!!!
[250929 23:51:22] [LN:1002] INF: getFileMonitorRunningStatus complete
[250930 11:48:05] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250930 11:48:05] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250930 11:48:05] [LN:1001] INF: FileMonitor is NOT running!!!
[250930 11:48:05] [LN:1002] INF: getFileMonitorRunningStatus complete
[250930 14:53:47] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250930 14:53:47] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250930 14:53:47] [LN:1001] INF: FileMonitor is NOT running!!!
[250930 14:53:47] [LN:1002] INF: getFileMonitorRunningStatus complete
[250930 14:55:28] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250930 14:55:28] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250930 14:55:28] [LN:1001] INF: FileMonitor is NOT running!!!
[250930 14:55:28] [LN:1002] INF: getFileMonitorRunningStatus complete
[250930 14:55:43] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250930 14:55:43] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250930 14:55:43] [LN:1001] INF: FileMonitor is NOT running!!!
[250930 14:55:43] [LN:1002] INF: getFileMonitorRunningStatus complete
[250930 14:56:19] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[250930 14:56:19] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[250930 14:56:19] [LN:1001] INF: FileMonitor is NOT running!!!
[250930 14:56:19] [LN:1002] INF: getFileMonitorRunningStatus complete
[251002 01:00:48] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251002 01:00:48] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251002 01:00:48] [LN:1001] INF: FileMonitor is NOT running!!!
[251002 01:00:48] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 00:17:59] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 00:17:59] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 00:17:59] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 00:17:59] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 00:21:06] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 00:21:06] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 00:21:06] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 00:21:06] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 20:23:27] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 20:23:27] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 20:23:27] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 20:23:27] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 20:48:08] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 20:48:08] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 20:48:08] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 20:48:08] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 20:51:44] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 20:51:44] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 20:51:44] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 20:51:44] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 21:00:16] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 21:00:16] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 21:00:16] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 21:00:16] [LN:1002] INF: getFileMonitorRunningStatus complete
[251004 22:20:17] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251004 22:20:17] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251004 22:20:17] [LN:1001] INF: FileMonitor is NOT running!!!
[251004 22:20:17] [LN:1002] INF: getFileMonitorRunningStatus complete
[251005 11:10:14] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251005 11:10:14] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251005 11:10:14] [LN:1001] INF: FileMonitor is NOT running!!!
[251005 11:10:14] [LN:1002] INF: getFileMonitorRunningStatus complete
[251005 17:08:41] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251005 17:08:41] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251005 17:08:41] [LN:1001] INF: FileMonitor is NOT running!!!
[251005 17:08:41] [LN:1002] INF: getFileMonitorRunningStatus complete
[251005 21:35:44] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251005 21:35:44] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251005 21:35:44] [LN:1001] INF: FileMonitor is NOT running!!!
[251005 21:35:44] [LN:1002] INF: getFileMonitorRunningStatus complete
[251005 22:11:25] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251005 22:11:25] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251005 22:11:25] [LN:1001] INF: FileMonitor is NOT running!!!
[251005 22:11:25] [LN:1002] INF: getFileMonitorRunningStatus complete
[251006 00:28:28] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251006 00:28:28] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251006 00:28:28] [LN:1001] INF: FileMonitor is NOT running!!!
[251006 00:28:28] [LN:1002] INF: getFileMonitorRunningStatus complete
[251006 09:48:04] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251006 09:48:04] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251006 09:48:04] [LN:1001] INF: FileMonitor is NOT running!!!
[251006 09:48:04] [LN:1002] INF: getFileMonitorRunningStatus complete
[251006 10:27:11] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251006 10:27:11] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251006 10:27:11] [LN:1001] INF: FileMonitor is NOT running!!!
[251006 10:27:11] [LN:1002] INF: getFileMonitorRunningStatus complete
[251006 10:29:33] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251006 10:29:33] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251006 10:29:33] [LN:1001] INF: FileMonitor is NOT running!!!
[251006 10:29:33] [LN:1002] INF: getFileMonitorRunningStatus complete
[251006 10:34:01] [LN:79] INF:
Starting (__file__=/root/.stash/plugins/community/filemonitor/filemonitor.py) (stash.CALLED_AS_STASH_PLUGIN=True) (stash.DEBUG_TRACING=False) (stash.DRY_RUN=False) (stash.PLUGIN_TASK_NAME=getFileMonitorRunningStatus)************************************************
[251006 10:34:01] [LN:193] INF: This Stash instance GQL = http://localhost:9999
[251006 10:34:01] [LN:1001] INF: FileMonitor is NOT running!!!
[251006 10:34:01] [LN:1002] INF: getFileMonitorRunningStatus complete

View File

@@ -1,3 +0,0 @@
# Filename parser
https://discourse.stashapp.cc/t/filename-parser/1378

View File

@@ -1,427 +0,0 @@
function ok() {
return {
output: "ok",
};
}
function main() {
var hookContext = input.Args.hookContext;
var type = hookContext.type;
var ID = hookContext.ID;
if (!ID) {
return ok();
}
var filenameFetcher;
var saver;
if (type === "Scene.Create.Post") {
filenameFetcher = getSceneFilename;
saver = updateScene;
} else if (type === "Gallery.Create.Post") {
filenameFetcher = getGalleryFilename;
saver = updateGallery;
} else {
return ok();
}
var filename = filenameFetcher(ID);
if (!filename) {
return ok();
}
filename = cleanFilename(filename);
var parseResult = parseFilename(filename);
saver(ID, parseResult);
return ok();
}
function getSceneFilename(sceneID) {
var query =
"\
query findScene($id: ID) {\
findScene(id: $id) {\
files {\
path\
}\
}\
}";
var variables = {
id: sceneID,
};
var result = gql.Do(query, variables);
var findScene = result.findScene;
if (!findScene) {
return null;
}
var path = findScene.files[0].path;
return path.substring(path.lastIndexOf("/") + 1);
}
function updateScene(sceneID, parseResult) {
var query =
"\
mutation SceneUpdate($input: SceneUpdateInput!) {\
sceneUpdate(input: $input) {\
id\
}\
}";
var variables = {
input: parseResult,
};
variables.input.id = sceneID;
gql.Do(query, variables);
}
function getGalleryFilename(galleryID) {
var query =
"\
query findGallery($id: ID!) {\
findGallery(id: $id) {\
files {\
path\
}\
}\
}";
var variables = {
id: galleryID,
};
var result = gql.Do(query, variables);
var findGallery = result.findGallery;
if (!findGallery) {
return null;
}
var path = findGallery.files[0].path;
return path.substring(path.lastIndexOf("/") + 1);
}
function updateGallery(galleryID, parseResult) {
var query =
"\
mutation GalleryUpdate($input: GalleryUpdateInput!) {\
galleryUpdate(input: $input) {\
id\
}\
}";
var variables = {
input: parseResult,
};
variables.input.id = galleryID;
gql.Do(query, variables);
}
function matchNames(parts, name, aliases) {
var names = [name].concat(aliases);
var partRegexes = [];
for (var i = 0; i < parts.length; i++) {
partRegexes[i] = new RegExp("^" + parts[i].toLowerCase() + "[. \\-_]*");
}
var cleanRegex = /[. \-_]/g;
var longestMatch = 0;
for (var i = 0; i < names.length; i++) {
var name = names[i].replace(cleanRegex, "").toLowerCase();
for (var j = 0; j < partRegexes.length; j++) {
if (!partRegexes[j].test(name)) {
break;
}
name = name.replace(partRegexes[j], "");
if (name.length === 0) {
if (j + 1 > longestMatch) {
longestMatch = j + 1;
}
}
}
}
return longestMatch;
}
function cleanFilename(name) {
name = name
// remove imageset-...[rarbg]
.replace(/imageset-[\w\d]+\[rarbg]/i, "")
// replace [...] with just ...
.replace(/\[(.*?)]/g, "$1")
// replace (...) with just ...
.replace(/\((.*?)\)/g, "$1")
// replace {...} with just ...
.replace(/{(.*?)}/g, "$1");
var blockList = [
"mp4",
"mov",
"mkv",
"zip",
"cbz",
"cbr",
"xxx",
"4k",
"4096x2160",
"3840x2160",
"2160p",
"1080p",
"1920x1080",
"60fps",
"30fps",
"repack",
"ktr",
];
var regExp = new RegExp(
"(_|[^\\w\\d]|^)(" + blockList.join("|") + ")(_|[^\\w\\d]|$)",
"i"
);
while (regExp.test(name)) {
name = name.replace(regExp, "$1$3");
}
// If name starts with <...>.com remove the .com (sometimes names include studio name as site/domain)
name = name.replace(/^([\w\d-]+?)\.com/, "$1");
name = name
// Remove everything except letters and digits at the start
.replace(/^(_|[^\w\d])+/, "")
// Remove everything except letters and digits at the end
.replace(/(_|[^\w\d])+$/, "");
return name;
}
function matchStudio(parts, result) {
var query =
"\
query findStudios($studio_filter: StudioFilterType, $filter: FindFilterType!) {\
findStudios(studio_filter: $studio_filter, filter: $filter) {\
studios {\
id\
name\
aliases\
}\
}\
}";
var searchTerm = parts[0].substring(0, 2);
if (parts[0].substring(0, 1) === "a") {
searchTerm = parts[0].substring(1, 3);
}
var variables = {
filter: {
per_page: -1,
},
studio_filter: {
name: {
modifier: "INCLUDES",
value: searchTerm,
},
OR: {
aliases: {
modifier: "INCLUDES",
value: searchTerm,
},
},
},
};
var queryResult = gql.Do(query, variables);
var studios = queryResult.findStudios.studios;
if (!studios.length && parts[0].substring(0, 1) === "a") {
variables.studio_filter.name.value =
variables.studio_filter.OR.aliases.value = parts[0].substring(1, 3);
queryResult = gql.Do(query, variables);
studios = queryResult.findStudios.studios;
}
var matchingParts = 0;
for (var i = 0; i < studios.length; i++) {
var studio = studios[i];
matchingParts = matchNames(parts, studio.name, studio.aliases);
if (matchingParts === 0) {
continue;
}
result.studio_id = studio.id;
break;
}
return matchingParts;
}
function matchDate(parts, result) {
if (
parts.length < 3 ||
!/^(\d{2}|\d{4})$/.test(parts[0]) ||
!/^\d{2}$/.test(parts[1]) ||
!/^\d{2}$/.test(parts[2])
) {
return 0;
}
var year = parseInt(parts[0], 10);
var month = parseInt(parts[1], 10);
var day = parseInt(parts[2], 10);
if (year < 100) {
year += 2000;
}
if (
year < 2000 ||
year > 2100 ||
month < 1 ||
month > 12 ||
day < 1 ||
day > 31
) {
return 0;
}
result.date =
year +
"-" +
(month < 10 ? "0" + month : month) +
"-" +
(day < 10 ? "0" + day : day);
return 3;
}
function matchPerformers(parts, result) {
var query =
"\
query findPerformers($performer_filter: PerformerFilterType, $filter: FindFilterType!) {\
findPerformers(performer_filter: $performer_filter, filter: $filter) {\
performers {\
id\
name\
alias_list\
}\
}\
}";
var variables = {
filter: {
per_page: -1,
},
performer_filter: {
name: {
modifier: "INCLUDES",
},
OR: {
aliases: {
modifier: "INCLUDES",
},
},
},
};
var totalMatchingParts = 0;
result.performer_ids = [];
do {
variables.performer_filter.name.value =
variables.performer_filter.OR.aliases.value = parts[0].substring(0, 2);
var queryResult = gql.Do(query, variables);
var performers = queryResult.findPerformers.performers;
if (!performers.length) {
parts.shift();
continue;
}
var maxMatchLength = 0;
var matches = [];
for (var i = 0; i < performers.length; i++) {
var performer = performers[i];
var aliases = performer.aliases
? performer.aliases.split(/\s*[,;]+\s*/)
: [];
var matchingParts = matchNames(parts, performer.name, aliases);
if (matchingParts === 0) {
continue;
}
if (matchingParts > maxMatchLength) {
maxMatchLength = matchingParts;
matches = [performer.id];
} else if (matchingParts === maxMatchLength) {
matches.push(performer.id);
}
}
if (maxMatchLength === 0) {
break;
}
result.performer_ids = result.performer_ids.concat(matches);
totalMatchingParts += maxMatchLength;
parts = parts.slice(maxMatchLength);
while (
parts.length > 0 &&
(parts[0].toLowerCase() === "and" || parts[0] === "&")
) {
parts.shift();
totalMatchingParts += 1;
}
} while (parts.length > 0);
return totalMatchingParts;
}
function parseFilename(name) {
var parts = name.split(/[. \-_,]+/);
var matchers = [matchStudio, matchDate, matchPerformers];
var result = {};
var hasMatched = false;
for (
var matchTries = 0;
matchTries < 3 && !hasMatched && parts.length;
matchTries++
) {
for (var i = 0; i < matchers.length && parts.length > 0; i++) {
var matchedParts = matchers[i](parts, result);
if (matchedParts > 0) {
hasMatched = true;
parts = parts.slice(matchedParts);
}
}
// If no matchers worked remove a part. Maybe the format is correct but studio isn't found? etc
if (!hasMatched) {
parts.shift();
}
}
if (hasMatched && parts.length > 0) {
var title = parts.join(" ");
// Look behind assertions are not supported, so can't use `replace(/(?<=.)([A-Z]/g, ' $1')` so instead have to do a loop. Otherwise for example 'FooABar' will become 'Foo ABar' instead of 'Foo A Bar'
while (/[^\s][A-Z]/.test(title)) {
title = title.replace(/([^\s])([A-Z])/g, "$1 $2");
}
result.title = title.trim();
}
return result;
}
main();

View File

@@ -1,13 +0,0 @@
name: Filename parser
description: Parses filename into studio, date, performers and title
url:
version: 0.1
exec:
- filenameParser.js
interface: js
hooks:
- name: Prepopulates data based on filename
description:
triggeredBy:
- Scene.Create.Post
- Gallery.Create.Post

View File

@@ -1,12 +0,0 @@
id: filenameParser
name: Filename parser
metadata:
description: Parses filename into studio, date, performers and title
version: 0.1-034a8a7
date: "2025-05-18 19:37:19"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- README.md
- filenameParser.yml
- filenameParser.js

View File

@@ -1,94 +0,0 @@
# Star Identifier
https://discourse.stashapp.cc/t/star-identifier/3761
https://github.com/axxeman23/star_identifier
## Intro
Star Identifier uses [facial recognition](https://github.com/ageitgey/face_recognition) to automatically identify who is in images or scene screenshots from the performers already in your [Stash](https://github.com/stashapp/stash) library.
## Requirements
### Python3
__version: 3.10.x +__
#### Installing Python
1. Download Python [here](https://www.python.org/downloads/)
2. Install & add to your PATH
3. Configure Stash to use Python (if necessary. This can be set in the `System` tab of your `Settings` page)
### Libs & Dependencies
#### CMake
For Windows:
- You'll also need to install Microsoft Visual Studio 2015 (or newer) with C/C++ Compiler installed. [Link here](https://visualstudio.microsoft.com/downloads/)
- Install and add CMake to your PATH. [Link](https://cmake.org/download/)
- For more details, see [this issue](https://github.com/ageitgey/face_recognition/issues/175)
For Mac & Linux:
`brew install cmake`
#### Python Libraries
1. numpy
2. dlib
3. face_recognition
`pip install numpy dlib face_recognition`
For more details, see the [Face Recognition installation instructions](https://github.com/ageitgey/face_recognition#installation).
### Plugin Files
You'll need the following in your `plugins` folder from this repo. Copy `star_identifier.yml` to the `plugins` folder, and the rest of the files to a folder called `py_plugins` inside the `plugins` folder. If you already have `log.py` in `py_plugins`, skip copying that one (it should be the same)
```
star_identifier.yml
py_plugins:
| log.py
| star_identifier_config.py
| star_identifier_interface.py
| star_identifier.py
```
## Config
### Paths
Running the plugin will create a folder. By default, this will be created in your `plugins` folder, but you can change that in the config.
Face encodings will be saved to that new folder. The encodings file will be roughly 1MB per 1,000 performers.
### Stash Settings
Star Identifier uses a tag to find images or scenes you would like identified. By default, that tag is `star identifier`.
Since the recognition is based on a single performer image, that image needs to have a pretty clear front-facing view of the performer's face. If face_recognition fails to find a performer's face, Star Identifier will tag that performer with `star identifier performer error` by default.
### Star Identifier Settings
You can adjust the tolerance for identification here. `0.6` is default and typical, but I've found `0.5` to work well. Lower is more strict.
## Running
### Export Performers
This is the first step. Star Identifier loads each performer's image, encodes their facial features into a numpy array, and saves those arrays. The clearer the face of the performer, the better identification results will be. Performers whose faces are not recognized by face_recognition will be tagged for you to update as desired.
This only needs to be run once, or after new performers are added or have updated images.
### Identify Images
This loads all images in the stash database tagged with `star identifier` (by default), compares the recognized faces to the exported face database, and then adds all potential matches to those images as performers.
### Identify Scene Screenshots
This loads the screenshot for every scene in the stash database tagged with `star identifier` (by default), compares the recognized faces to the exported face database, and then adds all potential matches to those scenes as performers.
## Upcoming roadmap
See [issues](https://github.com/axxeman23/star_identifier/issues)

View File

@@ -1,52 +0,0 @@
import sys
# Log messages sent from a plugin instance are transmitted via stderr and are
# encoded with a prefix consisting of special character SOH, then the log
# level (one of t, d, i, w, e, or p - corresponding to trace, debug, info,
# warning, error and progress levels respectively), then special character
# STX.
#
# The LogTrace, LogDebug, LogInfo, LogWarning, and LogError methods, and their equivalent
# formatted methods are intended for use by plugin instances to transmit log
# messages. The LogProgress method is also intended for sending progress data.
#
def __prefix(level_char):
start_level_char = b'\x01'
end_level_char = b'\x02'
ret = start_level_char + level_char + end_level_char
return ret.decode()
def __log(level_char, s):
if level_char == "":
return
print(__prefix(level_char) + s + "\n", file=sys.stderr, flush=True)
def LogTrace(s):
__log(b't', s)
def LogDebug(s):
__log(b'd', s)
def LogInfo(s):
__log(b'i', s)
def LogWarning(s):
__log(b'w', s)
def LogError(s):
__log(b'e', s)
def LogProgress(p):
progress = min(max(0, p), 1)
__log(b'p', str(progress))

View File

@@ -1,17 +0,0 @@
id: star_identifier
name: Star Identifier
metadata:
description: Use facial recognition to automatically identify who is in images or
scene screenshots from the performers already in your Stash library.
version: 1.0-7435efb
date: "2025-09-29 18:43:27"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- star_identifier.yml
- star_identifier_config.py
- README.md
- star_identifier.py
- requirements.txt
- log.py
- star_identifier_interface.py

View File

@@ -1,3 +0,0 @@
face_recognition
numpy
requests

View File

@@ -1,314 +0,0 @@
# https://github.com/axxeman23/star_identifier
# built-in
import json
import sys
import os
import pathlib
from concurrent.futures import ProcessPoolExecutor
# external
import urllib.request
import face_recognition
import numpy as np
# local
import log
import star_identifier_config as config
from star_identifier_interface import IdentifierStashInterface
#
# constants
#
current_path = str(config.root_path or pathlib.Path(__file__).parent.absolute())
encoding_export_folder = str(pathlib.Path(current_path + f'/../{config.encodings_folder}/').absolute())
encodings_path = os.path.join(encoding_export_folder, config.encodings_filename)
errors_path = os.path.join(encoding_export_folder, config.encodings_error_filename)
#
# main
#
def main():
json_input = read_json_input()
output = {}
try:
run(json_input)
except Exception as error:
log.LogError(str(error))
return
out = json.dumps(output)
print(out + "\n")
def run(json_input):
log.LogInfo('==> running')
mode_arg = json_input['args']['mode']
client = IdentifierStashInterface(json_input["server_connection"])
match mode_arg:
case "export_known":
export_known(client)
case "identify_imgs":
identify_imgs(client, *load_encodings())
case "identify_scene_screenshots":
identify_scene_screenshots(client, *load_encodings())
case "debug":
debug_func(client)
case _:
export_known(client)
#
# utils
#
def read_json_input():
json_input = sys.stdin.read()
return json.loads(json_input)
def json_print(input, path):
os.makedirs(encoding_export_folder, exist_ok=True)
f = open(path, 'w')
json.dump(input, f)
f.close()
def get_scrape_tag(client, tag_name):
tag_id = client.findTagIdWithName(tag_name)
if tag_id is not None:
return tag_id
else:
client.createTagWithName(tag_name)
tag_id = client.findTagIdWithName(tag_name)
return tag_id
def get_scrape_tag_filter(client):
return {
"tags": {
"value": [get_scrape_tag(client, config.tag_name_identify)],
"modifier": "INCLUDES_ALL"
}
}
def load_encodings():
log.LogInfo("Loading exported face encodings...")
e = Exception(f"Encoding database not found at {encodings_path}. Run Export Performers and try again.")
try:
ids = []
known_face_encodings = []
npz = np.load(encodings_path)
if not len(npz):
raise e
for id in npz:
ids.append(id)
known_face_encodings.append(npz[id])
return [ids, known_face_encodings]
except FileNotFoundError:
raise e
#
# debug
#
def debug_print(input):
f = open(os.path.join(current_path, 'debug.txt'), 'a')
f.write(str(input))
f.close()
def debug_func(client):
f = open(os.path.join(current_path, 'debug.txt'), 'w')
f.close()
#
# export function
#
def export_known(client):
log.LogInfo('Getting all performer images...')
performers = client.getPerformerImages()
total = len(performers)
log.LogInfo(f"Found {total} performers")
if total == 0:
log.LogError('No performers found.')
return
os.makedirs(encoding_export_folder, exist_ok=True)
count = 0
outputDict = {}
errorList = []
log.LogInfo('Starting performer image export (this might take a while)')
futures_list = []
with ProcessPoolExecutor(max_workers=10) as executor:
for performer in performers:
futures_list.append(executor.submit(encode_performer_from_url, performer))
for future in futures_list:
log.LogProgress(count / total)
try:
result = future.result()
outputDict[result['id']] = result['encodings']
except IndexError:
log.LogInfo(f"No face found for {result['name']}")
errorList.append({ 'id': result['id'], 'name': result['name'] })
count += 1
np.savez(encodings_path, **outputDict)
json_print(errorList, errors_path)
log.LogInfo(f'Finished exporting all {total} performer images. Failed recognitions saved to {str(errors_path)}.')
error_tag = get_scrape_tag(client, config.tag_name_encoding_error)
error_ids = list(map(lambda entry: entry['id'], errorList))
log.LogInfo(f"Tagging failed performer exports with {config.tag_name_encoding_error}...")
client.bulkPerformerAddTags(error_ids, [error_tag])
#
# Facial recognition functions
#
# Encoding
def encode_performer_from_url(performer):
image = face_recognition.load_image_file(urllib.request.urlopen(performer['image_path']))
performer['encodings'] = face_recognition.face_encodings(image)[0]
return performer
# Matching
def get_recognized_ids_from_image(image, known_face_encodings, ids):
image['matched_ids'] = get_recognized_ids(face_recognition.load_image_file(image['path']), known_face_encodings, ids)
return image
def get_recognized_ids_from_scene_screenshot(scene, known_face_encodings, ids):
image = urllib.request.urlopen(scene['paths']['screenshot'])
scene['matched_ids'] = get_recognized_ids(face_recognition.load_image_file(image), known_face_encodings, ids)
return scene
def get_recognized_ids(image_file, known_face_encodings, ids):
unknown_face_encodings = face_recognition.face_encodings(image_file)
recognized_ids = np.empty((0,0), int)
for unknown_face in unknown_face_encodings:
results = face_recognition.compare_faces(known_face_encodings, unknown_face, tolerance=config.tolerance)
recognized_ids = np.append(recognized_ids, [ids[i] for i in range(len(results)) if results[i] == True])
return np.unique(recognized_ids).tolist()
# Execution
def execute_identification_list(known_face_encodings, ids, args):
count = 0
futures_list = []
with ProcessPoolExecutor(max_workers=10) as executor:
for item in args['items']:
futures_list.append(executor.submit(args['executor_func'], *[item, known_face_encodings, ids]))
for future in futures_list:
log.LogProgress(count / args['total'])
debug_print(future)
try:
result = future.result()
if not len(result['matched_ids']):
log.LogInfo(f"No matching performer found for {args['name']} id {result['id']}. Moving on to next {args['name']}...")
else:
log.LogDebug(f"updating {args['name']} {result['id']} with ")
args['submit_func'](result['id'], result['matched_ids'])
except IndexError:
log.LogError(f"No face found in tagged {args['name']} id {result['id']}. Moving on to next {args['name']}...")
except:
log.LogError(f"Unknown error comparing tagged {args['name']} id {result['id']}. Moving on to next {args['name']}...")
count += 1
# Imgs
def identify_imgs(client, ids, known_face_encodings):
log.LogInfo(f"Getting images tagged with '{config.tag_name_identify}'...")
images = client.findImages(get_scrape_tag_filter(client))
total = len(images)
if not total:
log.LogError(f"No tagged images found. Tag images with '{config.tag_name_identify}', then try again.")
return
log.LogInfo(f"Found {total} tagged images. Starting identification...")
execution_args = {
'name': 'image',
'items': images,
'total': total,
'executor_func': get_recognized_ids_from_image,
'submit_func': client.addPerformersToImage
}
execute_identification_list(
known_face_encodings,
ids,
execution_args
)
log.LogInfo('Image identification complete!')
# Scenes
def identify_scene_screenshots(client, ids, known_face_encodings):
log.LogInfo(f"Getting scenes tagged with '{config.tag_name_identify}'...")
scenes = client.getScenePaths(get_scrape_tag_filter(client))
total = len(scenes)
if not total:
log.LogError(f"No tagged scenes found. Tag scenes with '{config.tag_name_identify}', then try again.")
return
log.LogInfo(f"Found {total} tagged scenes. Starting identification...")
execution_args = {
'name': 'scene',
'items': scenes,
'total': total,
'executor_func': get_recognized_ids_from_scene_screenshot,
'submit_func': client.addPerformersToScene
}
execute_identification_list(
known_face_encodings,
ids,
execution_args
)
log.LogInfo("Scene screenshot identification complete!")
if __name__ == "__main__":
main()
# https://github.com/ageitgey/face_recognition
# https://github.com/ageitgey/face_recognition/issues/175

View File

@@ -1,21 +0,0 @@
name: Star Identifier
description: Use facial recognition to automatically identify who is in images or scene screenshots from the performers already in your Stash library.
version: 1.0
url: https://github.com/axxeman23/star_identifier
exec:
- python
- "{pluginDir}/py_plugins/star_identifier.py"
interface: raw
tasks:
- name: Export Performers
description: Run this first! Exports current performer images and adds them to an encoding file for recognition.
defaultArgs:
mode: export_known
- name: Identify Images
description: Compares images tagged with 'star identifier' (by default) to exported performers, and adds all possible matches to the images.
defaultArgs:
mode: identify_imgs
- name: Identify Scene Screenshots
description: Compares scene screenshots tagged with 'star identifier' (by default) to exported performers, and adds all possible matches to the scenes.
defaultArgs:
mode: identify_scene_screenshots

View File

@@ -1,27 +0,0 @@
#
# Paths
#
root_path = '' # defaults to plugins folder
encodings_folder = 'star-identifier-encodings'
encodings_filename = 'star-identifier-encodings.npz'
encodings_error_filename = 'errors.json'
#
# Stash Settings
#
# The identifier will run on images / scenes tagged with this
tag_name_identify = 'star identifier'
# If the identifier can't find a face for a performer,
# it will add this tag to that performer
tag_name_encoding_error = 'star identifier performer error'
#
# Star Identifier Settings
#
# Tolerance: How much distance between faces to consider it a match.
# Lower is more strict. 0.6 is typical best performance.
tolerance = 0.6

View File

@@ -1,302 +0,0 @@
# most of this copied from https://github.com/niemands/StashPlugins
import requests
import sys
import log
class IdentifierStashInterface:
port = ""
url = ""
headers = {
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Accept": "application/json",
"Connection": "keep-alive",
"DNT": "1"
}
cookies = {}
#
# Init
#
def __init__(self, conn):
self.port = conn['Port']
scheme = conn['Scheme']
# Session cookie for authentication
self.cookies = {
'session': conn.get('SessionCookie').get('Value')
}
try:
# If stash does not accept connections from all interfaces use the host specified in the config
host = conn.get('Host') if '0.0.0.0' not in conn.get('Host') or '' else 'localhost'
except TypeError:
# Pre stable 0.8
host = 'localhost'
# Stash GraphQL endpoint
self.url = scheme + "://" + host + ":" + str(self.port) + "/graphql"
log.LogDebug(f"Using stash GraphQl endpoint at {self.url}")
def __callGraphQL(self, query, variables=None):
json = {'query': query}
if variables is not None:
json['variables'] = variables
response = requests.post(self.url, json=json, headers=self.headers, cookies=self.cookies)
if response.status_code == 200:
result = response.json()
if result.get("error", None):
for error in result["error"]["errors"]:
raise Exception("GraphQL error: {}".format(error))
if result.get("data", None):
return result.get("data")
elif response.status_code == 401:
sys.exit("HTTP Error 401, Unauthorised. Cookie authentication most likely failed")
else:
raise ConnectionError(
"GraphQL query failed:{} - {}. Query: {}. Variables: {}".format(
response.status_code, response.content, query, variables)
)
#
# Queries
#
# Performers
def getPerformerImages(self, performer_filter=None):
return self.__getPerformerImages(performer_filter)
def __getPerformerImages(self, performer_filter=None, page=1):
per_page = 1000
query = """
query($per_page: Int, $page: Int, $performer_filter: PerformerFilterType) {
findPerformers(
performer_filter: $performer_filter
filter: { per_page: $per_page, page: $page }
) {
count
performers {
id
name
image_path
}
}
}
"""
variables = {
'per_page': per_page,
'page': page
}
if performer_filter:
variables['performer_filter'] = performer_filter
result = self.__callGraphQL(query, variables)
performers = result.get('findPerformers').get('performers')
if len(performers) == per_page:
next_page = self.__getPerformerImages(performer_filter, page + 1)
for performer in next_page:
performers.append(performer)
return performers
# Tags
def findTagIdWithName(self, name):
query = """
query($name: String!) {
findTags(
tag_filter: {
name: {value: $name, modifier: EQUALS}
}
){
tags{
id
name
}
}
}
"""
variables = {
'name': name,
}
result = self.__callGraphQL(query, variables)
if result.get('findTags') is not None and result.get('findTags').get('tags') != []:
return result.get('findTags').get('tags')[0].get('id')
return None
# Images
def findImages(self, image_filter=None):
return self.__findImages(image_filter)
def __findImages(self, image_filter=None, page=1):
per_page = 1000
query = """
query($per_page: Int, $page: Int, $image_filter: ImageFilterType) {
findImages(
image_filter: $image_filter,
filter: { per_page: $per_page, page: $page }
) {
count
images {
id
path
performers {
id
}
}
}
}
"""
variables = {
'per_page': per_page,
'page': page
}
if image_filter:
variables['image_filter'] = image_filter
result = self.__callGraphQL(query, variables)
images = result.get('findImages').get('images')
if len(images) == per_page:
next_page = self.__findImages(image_filter, page + 1)
for image in next_page:
images.append(image)
return images
# Scenes
def getScenePaths(self, scene_filter=None):
return self.__getScenePaths(scene_filter)
def __getScenePaths(self, scene_filter=None, page=1):
per_page = 1000
query = """
query($per_page: Int, $page: Int, $scene_filter: SceneFilterType) {
findScenes(
scene_filter: $scene_filter,
filter: { per_page: $per_page, page: $page }
) {
count
scenes {
id
paths {
screenshot
stream
}
}
}
}
"""
variables = {
'per_page': per_page,
'page': page
}
if scene_filter:
variables['scene_filter'] = scene_filter
result = self.__callGraphQL(query, variables)
scenes = result.get('findScenes').get('scenes')
if len(scenes) == 1000:
next_page = self.__getScenePaths(scene_filter, page + 1)
for scene in next_page:
scenes.append(scene)
return scenes
#
# Mutations
#
def createTagWithName(self, name):
query = """
mutation tagCreate($input:TagCreateInput!) {
tagCreate(input: $input){
id
}
}
"""
variables = {'input': {
'name': name
}}
result = self.__callGraphQL(query, variables)
if result.get('tagCreate'):
log.LogDebug(f"Created tag: {name}")
return result.get('tagCreate').get("id")
else:
log.LogError(f"Could not create tag: {name}")
return None
def updateImage(self, image_data):
query = """
mutation($input: ImageUpdateInput!) {
imageUpdate(input: $input) {
id
}
}
"""
variables = {'input': image_data}
self.__callGraphQL(query, variables)
def addPerformersToImage(self, image_id, performer_ids):
self.updateImage({
'id': image_id,
'performer_ids': performer_ids
})
def bulkPerformerAddTags(self, performer_ids, tag_ids):
query = """
mutation($ids: [ID!], $tag_ids: BulkUpdateIds) {
bulkPerformerUpdate(input: { ids: $ids, tag_ids: $tag_ids }) {
id
}
}
"""
variables = {
"ids": performer_ids,
"tag_ids": {
"ids": tag_ids,
"mode": 'ADD'
}
}
self.__callGraphQL(query, variables)
def addPerformersToScene(self, scene_id, performer_ids):
query = """
mutation BulkSceneUpdate($ids: [ID!], $performer_ids: BulkUpdateIds) {
bulkSceneUpdate(input: { ids: $ids, performer_ids: $performer_ids}) {
id
}
}
"""
variables = {
"ids": [scene_id],
"performer_ids": {
"ids": performer_ids,
"mode": "ADD"
}
}
self.__callGraphQL(query, variables)

View File

@@ -1,3 +0,0 @@
# Stash AI
https://discourse.stashapp.cc/t/stash-ai/1392

View File

@@ -1,13 +0,0 @@
id: stashai
name: Stash AI
metadata:
description: Add Tags or Markers to a video using AI
version: 1.0.2-034a8a7
date: "2025-05-18 19:37:19"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- stashai.yml
- README.md
- stashai.css
- stashai.js

View File

@@ -1,155 +0,0 @@
button.svelte-1m5gxnd {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-1m5gxnd {
animation: svelte-1m5gxnd-pulse 2s infinite;
}
@keyframes svelte-1m5gxnd-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
svg.svelte-1m5gxnd {
fill: #ffffff;
}
button.svelte-xcs6vi {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-xcs6vi {
animation: svelte-xcs6vi-pulse 2s infinite;
}
@keyframes svelte-xcs6vi-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
.top-accent.svelte-9viihb {
border-top: 10px solid var(--primary);
border-radius: 3px;
}
.modal-header.svelte-9viihb {
font-size: 2.4rem;
border-bottom: 0px;
padding: 20px;
}
.modal-footer.svelte-9viihb {
border-top: 0px;
}
.lds-dual-ring.svelte-9viihb {
display: inline-block;
width: 16px;
height: 16px;
}
.lds-dual-ring.svelte-9viihb:after {
content: " ";
display: block;
width: 12px;
height: 12px;
margin: 3px;
border-radius: 50%;
border: 6px solid #fff;
border-color: #fff transparent #fff transparent;
animation: svelte-9viihb-lds-dual-ring 1.2s linear infinite;
}
@keyframes svelte-9viihb-lds-dual-ring {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
.modal-header.svelte-qsvzsw {
font-size: 1rem;
border-bottom: 0px;
padding: 20px;
}
.modal-footer.svelte-qsvzsw {
border-top: 0px;
}
.selected.svelte-qsvzsw {
border: 2px solid #007bff;
}
.tagger-tabs.svelte-qsvzsw {
position: absolute;
flex: 0 0 450px;
max-width: 450px;
min-width: 450px;
height: 100%;
overflow: auto;
order: -1;
background-color: var(--body-color);
}
.tag-item.svelte-zeu5yg {
background-color: var(--card-color);
width: 100%;
padding: 5px;
margin: 0px;
}
.tag-item-accept.svelte-zeu5yg:hover {
fill: #45a82c;
transition: fill 0.2s ease-out;
}
.tag-item-reject.svelte-zeu5yg:hover {
fill: #a82c2c;
transition: fill 0.2s ease-out;
}
.tag-item-select.svelte-zeu5yg {
border: none;
outline: none;
scroll-behavior: smooth;
}
.scrubber-item.svelte-zeu5yg {
width: 160px;
height: 90px;
border-radius: 5px 5px 0px 0px;
position: relative;
cursor: pointer;
}
svg.svelte-zeu5yg {
fill: #ffffff;
}
.tag-item.svelte-1d03wug {
background-color: var(--card-color);
width: 100%;
padding: 5px;
margin: 0px;
}
.tag-item-select.svelte-1d03wug {
border: none;
outline: none;
scroll-behavior: smooth;
}
.tag-item-reject.svelte-1d03wug:hover {
fill: #a82c2c;
transition: fill 0.2s ease-out;
}
.scrubber-item.svelte-1d03wug {
width: 160px;
height: 90px;
border-radius: 5px 5px 0px 0px;
position: relative;
}
svg.svelte-1d03wug {
fill: #ffffff;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +0,0 @@
name: Stash AI
# requires: CommunityScriptsUILibrary
description: Add Tags or Markers to a video using AI
version: 1.0.2
ui:
requires:
- CommunityScriptsUILibrary
javascript:
- stashai.js
css:
- stashai.css
csp:
connect-src:
- "https://cc1234-stashtag.hf.space"

View File

@@ -1,3 +0,0 @@
# stashdb performer gallery
https://discourse.stashapp.cc/t/stashdb-performer-gallery/1411

View File

@@ -1,17 +0,0 @@
id: stashdb-performer-gallery
name: stashdb performer gallery
metadata:
description: Automatically download performer images from stashdb or other stash-boxes.
Add the [Stashbox Performer Gallery] tag to a performer and it will create a gallery
of images from that stash-box database. Apply the tag [Set Profile Image] to an
image to set it as the profile image of that performer. Note you will need to
configure the download path and add this as a path under settings > library
version: 0.2-034a8a7
date: "2025-05-18 19:37:19"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- stashdb-performer-gallery.py
- README.md
- requirements.txt
- stashdb-performer-gallery.yml

View File

@@ -1,2 +0,0 @@
requests
stashapp-tools

View File

@@ -1,470 +0,0 @@
import stashapi.log as log
from stashapi.stashapp import StashInterface
from stashapi.stashbox import StashBoxInterface
import os
import sys
import requests
import json
import requests
from pathlib import Path
import base64
per_page = 100
request_s = requests.Session()
stash_boxes = {}
scrapers = {}
def processImages(img):
log.debug("image: %s" % (img,))
image_data = None
for file in [x["path"] for x in img["visual_files"]]:
if settings["path"] in file:
index_file = Path(Path(file).parent) / (Path(file).stem + ".json")
log.debug(index_file)
if index_file.exists():
log.debug("loading index file %s" % (index_file,))
with open(index_file) as f:
index = json.load(f)
index["id"] = img["id"]
if image_data:
image_data["gallery_ids"].extend(index["gallery_ids"])
else:
image_data = index
if image_data:
# log.debug(image_data)
stash.update_image(image_data)
def processPerformers():
query = {
"tags": {
"depth": 0,
"excludes": [],
"modifier": "INCLUDES_ALL",
"value": [tag_stashbox_performer_gallery],
}
}
performers = stash.find_performers(f=query)
for performer in performers:
processPerformer(performer)
def processPerformer(performer):
dir = Path(settings["path"]) / performer["id"]
dir.mkdir(parents=True, exist_ok=True)
nogallery = dir / ".nogallery"
nogallery.touch()
for sid in performer["stash_ids"]:
log.debug(sid)
processPerformerStashid(sid["endpoint"], sid["stash_id"], performer)
def get_stashbox(endpoint):
for sbx_config in stash.get_configuration()["general"]["stashBoxes"]:
if sbx_config["endpoint"] == endpoint:
stashbox = StashBoxInterface(
{"endpoint": sbx_config["endpoint"], "api_key": sbx_config["api_key"]}
)
stash_boxes[endpoint] = stashbox
return stashbox
def processPerformerStashid(endpoint, stashid, p):
log.info(
"processing performer %s, %s endpoint: %s, stash id: %s"
% (
p["name"],
p["id"],
endpoint,
stashid,
)
)
index_file = os.path.join(settings["path"], p["id"], "index.json")
if os.path.exists(index_file):
with open(os.path.join(settings["path"], p["id"], "index.json")) as f:
index = json.load(f)
else:
index = {"files": {}, "galleries": {}, "performer_id": p["id"]}
modified = False
stashbox = get_stashbox(endpoint)
if stashbox:
query = """id
name
images {
id
url
}
urls{
url
type
}
"""
perf = stashbox.find_performer(stashid, fragment=query)
log.debug(perf)
if endpoint not in index["galleries"]:
gallery_input = {
"title": "%s - %s "
% (
p["name"],
endpoint[8:-8],
),
"urls": [
"%s/performers/%s"
% (
endpoint[:-8],
stashid,
)
],
"tag_ids": [tag_stashbox_performer_gallery],
"performer_ids": [p["id"]],
}
gal = stash.create_gallery(gallery_input)
log.info("Created gallery %s" % (gal,))
index["galleries"][endpoint] = gal
modified = True
# check if the gallery still exists and has not been deleted
current_gal = stash.find_gallery(index["galleries"][endpoint])
log.debug("current: %s" % (current_gal,))
if current_gal is None:
log.debug("deleted?")
gallery_input = {
"title": "%s - %s "
% (
p["name"],
endpoint[:-8],
),
"urls": [
"%s/performers/%s"
% (
endpoint[:-8],
stashid,
)
],
"tag_ids": [tag_stashbox_performer_gallery],
"performer_ids": [p["id"]],
}
gal = stash.create_gallery(gallery_input)
log.info("Created gallery %s" % (gal,))
index["galleries"][endpoint] = gal
modified = True
if modified:
with open(index_file, "w") as f:
json.dump(index, f)
for img in perf["images"]:
image_index = Path(settings["path"]) / p["id"] / (img["id"] + ".json")
if not image_index.exists():
with open(image_index, "w") as f:
image_data = {
"title": img["id"],
"urls": [img["url"]],
"performer_ids": [p["id"]],
"tag_ids": [tag_stashbox_performer_gallery],
"gallery_ids": [index["galleries"][endpoint]],
}
json.dump(image_data, f)
filename = Path(settings["path"]) / p["id"] / (img["id"] + ".jpg")
if not os.path.exists(filename):
log.info(
"Downloading image %s to %s"
% (
img["url"],
filename,
)
)
r = requests.get(img["url"])
with open(filename, "wb") as f:
f.write(r.content)
f.close()
# modified=True
else:
log.debug("image already downloaded")
# scrape urls on the performer using the url scrapers in stash
if settings["runPerformerScraper"] and len(perf["urls"]) > 0:
# we need to determine what scrapers we have and what url patterns they accept, query what url patterns are supported, should only need to check once
if len(scrapers) == 0:
scrapers_graphql = """query ListPerformerScrapers {
listScrapers(types: [PERFORMER]) {
id
name
performer {
urls
supported_scrapes
}
}
}"""
res = stash.callGQL(scrapers_graphql)
for r in res["listScrapers"]:
if r["performer"]["urls"]:
for url in r["performer"]["urls"]:
scrapers[url] = r
for u in perf["urls"]:
for url in scrapers.keys():
if url in u["url"]:
log.info(
"Running stash scraper on performer url: %s" % (u["url"],)
)
res = stash.scrape_performer_url(u["url"])
# Check if the scraper returned a result
if res is not None:
log.debug(res)
# it's possible for multiple images to be returned by a scraper so incriment a number each image
image_id = 1
if res["images"]:
for image in res["images"]:
image_index = (
Path(settings["path"])
/ p["id"]
/ (
"%s-%s.json"
% (
scrapers[url]["id"],
image_id,
)
)
)
if not image_index.exists():
with open(image_index, "w") as f:
image_data = {
"title": "%s - %s "
% (
scrapers[url]["id"],
image_id,
),
"details": "name: %s\ngender: %s\nurl: %s\ntwitter: %s\ninstagram: %s\nbirthdate: %s\nethnicity: %s\ncountry: %s\neye_color: %s\nheight: %s\nmeasurements: %s\nfake tits: %s\npenis_length: %s\n career length: %s\ntattoos: %s\npiercings: %s\nhair_color: %s\nweight: %s\n description: %s\n"
% (
res["name"],
res["gender"],
res["url"],
res["twitter"],
res["instagram"],
res["birthdate"],
res["ethnicity"],
res["country"],
res["eye_color"],
res["height"],
res["measurements"],
res["fake_tits"],
res["penis_length"],
res["career_length"],
res["tattoos"],
res["piercings"],
res["hair_color"],
res["weight"],
res["details"],
),
"urls": [
u["url"],
],
"performer_ids": [p["id"]],
"tag_ids": [
tag_stashbox_performer_gallery
],
"gallery_ids": [
index["galleries"][endpoint]
],
}
json.dump(image_data, f)
filename = (
Path(settings["path"])
/ p["id"]
/ (
"%s-%s.jpg"
% (
scrapers[url]["id"],
image_id,
)
)
)
if not filename.exists():
if image.startswith("data:"):
with open(filename, "wb") as f:
f.write(
base64.b64decode(
image.split("base64,")[1]
)
)
f.close()
else:
with open(image_index, "w") as f:
image_data = {
"title": "%s - %s "
% (
scrapers[url]["id"],
image_id,
),
"details": "%s" % (res,),
"urls": [u["url"], image],
"performer_ids": [p["id"]],
"tag_ids": [
tag_stashbox_performer_gallery
],
"gallery_ids": [
index["galleries"][endpoint]
],
}
json.dump(image_data, f)
filename = (
Path(settings["path"])
/ p["id"]
/ ("%s.jpg" % (image_id,))
)
r = requests.get(img["url"])
if r.status_code == 200:
with open(filename, "wb") as f:
f.write(r.content)
f.close()
image_id = image_id + 1
# log.debug('%s %s' % (url['url'],url['type'],))
# stash.scraper
# scrape=stash.scrape_performer_url(ur)
else:
log.error("endpoint %s not configured, skipping" % (endpoint,))
def setPerformerPicture(img):
if len(img["performers"]) == 1:
log.debug(img["paths"]["image"])
res = request_s.get(img["paths"]["image"])
log.debug(res.headers["Content-Type"])
if res.status_code == 200:
encoded = base64.b64encode(res.content).decode()
new_performer = {
"id": img["performers"][0]["id"],
"image": "data:{0};base64,{1}".format(
res.headers["Content-Type"], encoded
),
}
log.info("updating performer with tagged image %s" % (new_performer["id"],))
stash.update_performer(new_performer)
def processQueue():
for id in settings["queue"].split(","):
if len(id) > 0:
p = stash.find_performer(id)
processPerformer(p)
# queue has not changed since we started, clear setting
if (
stash.get_configuration()["plugins"]["stashdb-performer-gallery"]
== settings["queue"]
):
stash.configure_plugin("stashdb-performer-gallery", {"queue": ""})
stash.metadata_scan(paths=[settings["path"]])
stash.run_plugin_task("stashdb-performer-gallery", "relink missing images")
else:
# update remove the completed entries from the queue string leaving the unprocessed and schedule the task again
log.debug("updating queue")
stash.configure_plugin(
"stashdb-performer-gallery",
{
"queue": stash.get_configuration()["plugins"][
"stashdb-performer-gallery"
]["queue"].removeprefix(settings["queue"])
},
)
stash.run_plugin_task(
"stashdb-performer-gallery", "Process Performers", args={"full": False}
)
def relink_images(performer_id=None):
query = {
"path": {"modifier": "INCLUDES", "value": settings["path"]},
}
if performer_id == None:
query["is_missing"] = "galleries"
query["path"] = {"modifier": "INCLUDES", "value": settings["path"]}
else:
query["path"] = {
"modifier": "INCLUDES",
"value": str(Path(settings["path"]) / performer_id / ""),
}
# else:
# query["file_count"] = {"modifier": "NOT_EQUALS", "value": 1}
total = stash.find_images(f=query, get_count=True)[0]
i = 0
images = []
while i < total:
images = stash.find_images(f=query, filter={"page": i, "per_page": per_page})
for img in images:
log.debug("image: %s" % (img,))
processImages(img)
i = i + 1
log.progress((i / total))
json_input = json.loads(sys.stdin.read())
FRAGMENT_SERVER = json_input["server_connection"]
stash = StashInterface(FRAGMENT_SERVER)
config = stash.get_configuration()["plugins"]
settings = {
"path": "/download_dir",
"runPerformerScraper": False,
}
if "stashdb-performer-gallery" in config:
settings.update(config["stashdb-performer-gallery"])
# log.info('config: %s ' % (settings,))
tag_stashbox_performer_gallery = stash.find_tag(
"[Stashbox Performer Gallery]", create=True
).get("id")
tag_performer_image = stash.find_tag("[Set Profile Image]", create=True).get("id")
if "stasdb-performer-gallery" in config:
settings.update(config["stasdb-performer-gallery"])
if "mode" in json_input["args"]:
PLUGIN_ARGS = json_input["args"]["mode"]
if "performer" in json_input["args"]:
p = stash.find_performer(json_input["args"]["performer"])
if tag_stashbox_performer_gallery in [x["id"] for x in p["tags"]]:
processPerformer(p)
stash.metadata_scan(paths=[settings["path"]])
stash.run_plugin_task(
"stashdb-performer-gallery",
"relink missing images",
args={"performer_id": p["id"]},
)
elif "processPerformers" in PLUGIN_ARGS:
processPerformers()
stash.metadata_scan([settings["path"]])
stash.run_plugin_task(
"stashdb-performer-gallery", "relink missing images", args={}
)
elif "processImages" in PLUGIN_ARGS:
if "performer_id" in json_input["args"]:
relink_images(performer_id=json_input["args"]["performer_id"])
else:
relink_images()
elif "hookContext" in json_input["args"]:
id = json_input["args"]["hookContext"]["id"]
if json_input["args"]["hookContext"]["type"] == "Image.Create.Post":
img = stash.find_image(image_in=id)
processImages(img)
if json_input["args"]["hookContext"]["type"] == "Image.Update.Post":
img = stash.find_image(image_in=id)
if tag_performer_image in [x["id"] for x in img["tags"]]:
setPerformerPicture(img)
if json_input["args"]["hookContext"]["type"] == "Performer.Update.Post":
stash.run_plugin_task(
"stashdb-performer-gallery", "Process Performers", args={"performer": id}
)

View File

@@ -1,41 +0,0 @@
name: stashdb performer gallery
description: Automatically download performer images from stashdb or other stash-boxes. Add the [Stashbox Performer Gallery] tag to a performer and it will create a gallery of images from that stash-box database. Apply the tag [Set Profile Image] to an image to set it as the profile image of that performer. Note you will need to configure the download path and add this as a path under settings > library
version: 0.2
url: https://github.com/stashapp/CommunityScripts/
exec:
- python
- "{pluginDir}/stashdb-performer-gallery.py"
interface: raw
settings:
path:
displayName: Download parent folder
description: Download location for files, note this should be in a different folder to stash and in a folder covered by stash. You may need to create a new library path to cover this directory.
type: STRING
runPerformerScraper:
displayName: Run stash scrapers on profile urls
description: Run scrapers on profile urls
type: BOOLEAN
hooks:
- name: modify performer
description: Download galleries on performer update if the [performer gallery] tag is applied
triggeredBy:
- Performer.Update.Post
- name: image add
description: Add images
triggeredBy:
- Image.Create.Post
- name: set profile images
description: Set profile images py adding the [Set Profile Image] tag to the image, there must be exactly 1 performer tagged on the image.
triggeredBy:
- Image.Update.Post
tasks:
- name: "Process Performers"
description: Fetch performer images for performers with the [performer gallery] tag
defaultArgs:
mode: processPerformers
- name: "relink missing images"
description: reprocess missing images
defaultArgs:
mode: processImages

View File

@@ -1,12 +0,0 @@
id: visage
name: Visage
metadata:
description: Use facial Recognition To Lookup Performers.
version: 1.0.2-adade5e
date: "2024-06-01 22:24:15"
requires: []
source_repository: https://stashapp.github.io/CommunityScripts/stable/index.yml
files:
- visage.yml
- visage.js
- visage.css

View File

@@ -1,132 +0,0 @@
button.svelte-1m5gxnd {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-1m5gxnd {
animation: svelte-1m5gxnd-pulse 2s infinite;
}
@keyframes svelte-1m5gxnd-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
svg.svelte-1m5gxnd {
fill: #ffffff;
}
button.svelte-1m5gxnd {
background-color: var(--nav-color);
border: 0px;
}
.scanner.svelte-1m5gxnd {
animation: svelte-1m5gxnd-pulse 2s infinite;
}
@keyframes svelte-1m5gxnd-pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--light);
}
70% {
transform: scale(1.1);
box-shadow: 0 0 0 10px var(--info);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 var(--primary);
}
}
svg.svelte-1m5gxnd {
fill: #ffffff;
}
.carousel.svelte-ssoxzi {
display: flex;
overflow-x: auto;
overflow-y: auto;
white-space: nowrap;
overscroll-behavior-x: contain;
overscroll-behavior-y: contain;
scroll-snap-type: x mandatory;
gap: 1rem;
}
.modal-header.svelte-ssoxzi {
font-size: 2.4rem;
border-bottom: 0px;
padding: 10px 10px 0px 10px;
}
.modal-footer.svelte-ssoxzi {
border-top: 0px;
}
.svelte-ssoxzi::-webkit-scrollbar {
width: 30px;
}
.svelte-ssoxzi::-webkit-scrollbar-thumb {
background: var(--orange);
border-radius: 20px;
}
.card.svelte-ssoxzi {
min-width: 250px;
}
.performer-card.svelte-ssoxzi {
cursor: pointer;
}
.assigned.svelte-ssoxzi {
border: 5px solid var(--green);
animation: border 1s ease-in-out;
}
.face-tab.svelte-ssoxzi {
width: 50px;
height: 50px;
object-fit: cover;
}
.selected.svelte-p95y28 {
border: 2px solid #007bff;
}
.face-tabs.svelte-p95y28 {
position: absolute;
flex: 0 0 450px;
max-width: 450px;
min-width: 450px;
height: 100%;
overflow: auto;
order: -1;
background-color: var(--body-color);
}
.face-item.svelte-p95y28 {
width: 160px;
height: 90px;
border-radius: 5px 5px 0px 0px;
position: relative;
cursor: pointer;
}
.svelte-tabs__tab.svelte-1fbofsd {
border: none;
border-bottom: 2px solid transparent;
color: #000000;
cursor: pointer;
list-style: none;
display: inline-block;
padding: 0.5em 0.75em;
}
.svelte-tabs__tab.svelte-1fbofsd:focus {
outline: thin dotted;
}
.svelte-tabs__selected.svelte-1fbofsd {
border-bottom: 2px solid #4f81e5;
color: #4f81e5;
}
.svelte-tabs__tab-panel.svelte-epfyet {
margin-top: 0.5em;
}
.svelte-tabs__tab-list.svelte-12yby2a {
border-bottom: 1px solid #cccccc;
margin: 0;
padding: 0;
}

File diff suppressed because one or more lines are too long

View File

@@ -1,14 +0,0 @@
name: Visage
# requires: CommunityScriptsUILibrary
description: Use facial Recognition To Lookup Performers.
version: 1.0.2
ui:
requires:
- CommunityScriptsUILibrary
javascript:
- visage.js
css:
- visage.css
csp:
connect-src:
- "https://cc1234-stashface.hf.space"

View File

@@ -1,55 +0,0 @@
name: 10Musume
sceneByURL:
- action: scrapeJson
url:
- en.10musume.com/movies/
scraper: sceneScraper
queryURL: "https://en.10musume.com/dyn/phpauto/movie_details/movie_id/{url}.json"
queryURLReplace:
url:
- regex: '.+/movies/(\d{6})_(\d{2}).+'
with: "${1}_${2}"
sceneByFragment:
action: scrapeJson
queryURL: "https://en.10musume.com/dyn/phpauto/movie_details/movie_id/{filename}.json"
scraper: sceneScraper
queryURLReplace:
filename:
- regex: '.*(\d{6})[_.-](\d{2}).+'
with: "${1}_${2}"
jsonScrapers:
sceneScraper:
scene:
Title:
selector: "[TitleEn,Title]"
concat: "|" # get rid of empty result
split: "|"
Details:
selector: "[DescEn,Desc]"
concat: "|" # get rid of empty result
split: "|"
postProcess:
- replace:
- regex: ^\s+(.+)\s+$
with: $1
Date:
selector: Release
postProcess:
- parseDate: 2006-01-02
Image: ThumbHigh
Performers:
Name: ActressesEn
Studio:
Name:
fixed: 10Musume
Tags:
Name: UCNAMEEn
URL:
selector: MovieID
postProcess:
- replace:
- regex: ^
with: "https://en.10musume.com/movies/"
- regex: $
with: "/"
# Last Updated November 09, 2021

View File

@@ -1,9 +0,0 @@
id: 10Musume
name: 10Musume
metadata: {}
version: d99c8c2
date: "2021-11-14 11:30:04"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 10Musume.yml

View File

@@ -1,35 +0,0 @@
name: "1 Pass For All Sites"
sceneByURL:
- action: scrapeXPath
url:
- 1passforallsites.com/episode/
scraper: sceneScraper
xPathScrapers:
sceneScraper:
scene:
Studio:
Name:
selector: //a[contains(@href,'?site=')]
Title:
selector: //title
postProcess:
- replace:
- regex: (^.+) - 1 .+$
with: $1
Details: //div[@class="sp-info-txt"]/p/text()
Performers:
Name:
selector: //p[@class="sp-info-name"]/a/text()
Tags:
Name:
selector: //p[@class="niches-list"]/a/text()
Date:
selector: //li[contains(text(),"Added:")]
postProcess:
- replace:
- regex: "Added\\: (.+)"
with: $1
- parseDate: 2 Jan 2006
Image: //video/@poster
# Last Updated July 12, 2023

View File

@@ -1,9 +0,0 @@
id: 1passforallsites
name: 1 Pass For All Sites
metadata: {}
version: 81ddf2c
date: "2023-07-13 02:50:30"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 1passforallsites.yml

View File

@@ -1,53 +0,0 @@
name: 1Pondo
sceneByURL:
- action: scrapeJson
url:
- en.1pondo.tv/movies/
scraper: sceneScraper
queryURL: "https://en.1pondo.tv/dyn/phpauto/movie_details/movie_id/{url}.json"
queryURLReplace:
url:
- regex: '.+/movies/(\d{6})_(\d{3}).+'
with: "${1}_${2}"
sceneByFragment:
action: scrapeJson
queryURL: "https://en.1pondo.tv/dyn/phpauto/movie_details/movie_id/{filename}.json"
scraper: sceneScraper
queryURLReplace:
filename:
- regex: '.*(\d{6})[_.-](\d{3}).+'
with: "${1}_${2}"
jsonScrapers:
sceneScraper:
scene:
Title:
selector: "[TitleEn,Title]"
concat: "|" # get rid of empty result
split: "|"
Details:
selector: DescEn
postProcess:
- replace:
- regex: ^\s+(.+)\s+$
with: $1
Date:
selector: Release
postProcess:
- parseDate: 2006-01-02
Image: ThumbHigh
Performers:
Name: ActressesEn
Studio:
Name:
fixed: 1Pondo
Tags:
Name: UCNAMEEn
URL:
selector: MovieID
postProcess:
- replace:
- regex: ^
with: "https://en.1pondo.tv/movies/"
- regex: $
with: "/"
# Last Updated April 30, 2021

View File

@@ -1,9 +0,0 @@
id: 1pondo
name: 1Pondo
metadata: {}
version: 66d4760
date: "2021-07-18 15:48:20"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 1pondo.yml

View File

@@ -1,31 +0,0 @@
# requires: Algolia
name: "21Naturals"
sceneByURL:
- action: script
url:
- 21naturals.com/en/video
script:
- python
- ../Algolia/Algolia.py
- 21naturals
sceneByFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21naturals
sceneByName:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21naturals
- searchName
sceneByQueryFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21naturals
- validName
# Last Updated March 23, 2022

View File

@@ -1,9 +0,0 @@
id: 21Naturals
name: 21Naturals
metadata: {}
version: 97bec71
date: "2023-11-22 00:53:44"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 21Naturals.yml

View File

@@ -1,61 +0,0 @@
name: "21Roles"
sceneByURL:
- action: scrapeXPath
url:
- 21roles.com/game/DisplayPlayer/gameId/
- lifeselector.com/game/DisplayPlayer/gameId/
scraper: sceneScraper
xPathScrapers:
sceneScraper:
scene:
Title:
selector: //title/text()
postProcess:
- replace:
- regex: .+(?:DisplayPlayer\s-\s)(.+)(?:\s-\sDisplayPlayer)+
with: $1
Details: //div[@class="info"]/p/text()
Tags:
Name:
selector: //title/text()
postProcess:
- replace:
- regex: .+(?:DisplayPlayer\s-\s)(.+)(?:\s-\sDisplayPlayer)+
with: https://21roles.com/site/search/keyword/$1
- regex: (.+)(?:\s-\sInteractive).+
with: https://lifeselector.com/site/search/keyword/$1
- subScraper:
selector: //div[@class="details"]/div[contains(.,'Labels')]//a/text()
concat: ","
split: ","
Performers:
Name:
selector: //div[@class="modelBlock"]/div[@class="description"]/h1/a/text()
postProcess:
- replace:
- regex: .+(?:\/)(\d+)+
with: https://21roles.com/game/DisplayPlayer/gameId/$1/view/cast
- subScraper:
selector: //div[@class="content"]//h1/a/text()
concat: ","
split: ","
Image:
selector: //div[@class="signup-right-col"]//input[@id="requestUri"]/@value
postProcess:
- replace:
- regex: .+(?:\/)(\d+)+
with: https://i.c7cdn.com/generator/games/$1/images/poster/1_size1600.jpg
Studio:
Name:
selector: //meta[@property='og:site_name']/@content
postProcess:
- map:
21roles.com: '21Roles'
LifeSelector: 'LifeSelector'
# Driver is used only to grab tags, if you are unable to use CDP and don't mind
# losing the tags, comment out or remove the driver lines
driver:
useCDP: true
# Last Updated November 08, 2020

View File

@@ -1,9 +0,0 @@
id: 21Roles
name: 21Roles
metadata: {}
version: 66d4760
date: "2021-07-18 15:48:20"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 21Roles.yml

View File

@@ -1,31 +0,0 @@
# requires: Algolia
name: "21Sextreme"
sceneByURL:
- action: script
url:
- 21sextreme.com/en/video
script:
- python
- ../Algolia/Algolia.py
- 21sextreme
sceneByFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextreme
sceneByName:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextreme
- searchName
sceneByQueryFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextreme
- validName
# Last Updated March 23, 2022

View File

@@ -1,9 +0,0 @@
id: 21Sextreme
name: 21Sextreme
metadata: {}
version: 97bec71
date: "2023-11-22 00:53:44"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 21Sextreme.yml

View File

@@ -1,40 +0,0 @@
# requires: Algolia
name: "21Sextury"
sceneByURL:
- action: script
url:
- 21sextury.com/en/video
script:
- python
- ../Algolia/Algolia.py
- 21sextury
sceneByFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextury
sceneByName:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextury
- searchName
sceneByQueryFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 21sextury
- validName
galleryByURL:
- action: script
url:
- 21sextury.com/en/photo/
script:
- python
- ../Algolia/Algolia.py
- 21sextury
- gallery
# Last Updated December 22, 2022

View File

@@ -1,9 +0,0 @@
id: 21Sextury
name: 21Sextury
metadata: {}
version: 97bec71
date: "2023-11-22 00:53:44"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 21Sextury.yml

View File

@@ -1,49 +0,0 @@
# requires: Algolia
name: "3rdDegreeFilms"
sceneByURL:
- action: script
url:
- www.3rddegreefilms.com/en/video
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
sceneByFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
sceneByName:
action: script
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
- searchName
sceneByQueryFragment:
action: script
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
- validName
movieByURL:
- action: script
url:
- 3rddegreefilms.com/en/movie
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
- movie
galleryByURL:
- action: script
url:
- 3rddegreefilms.com/en/photo/
script:
- python
- ../Algolia/Algolia.py
- 3rddegreefilms
- gallery
# Last Updated March 13, 2024

View File

@@ -1,9 +0,0 @@
id: 3rdDegreeFilms
name: 3rdDegreeFilms
metadata: {}
version: c616a0d
date: "2024-03-14 00:33:44"
requires: []
source_repository: https://stashapp.github.io/CommunityScrapers/stable/index.yml
files:
- 3rdDegreeFilms.yml

Some files were not shown because too many files have changed in this diff Show More