mirror of
https://github.com/TagStudioDev/TagStudio.git
synced 2026-01-31 07:10:45 +00:00
Merge branch 'main' into feature/typed-dict-and-typing
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -249,5 +249,6 @@ compile_commands.json
|
||||
|
||||
# TagStudio
|
||||
.TagStudio
|
||||
TagStudio.ini
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,python,qt
|
||||
|
||||
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -8,7 +8,7 @@
|
||||
"name": "TagStudio",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${workspaceRoot}\\TagStudio\\tagstudio.py",
|
||||
"program": "${workspaceRoot}/tagstudio/tag_studio.py",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true,
|
||||
"args": []
|
||||
|
||||
@@ -90,19 +90,23 @@ _Learn more about setting up a virtual environment [here](https://docs.python.or
|
||||
|
||||
To launch TagStudio, launch the `start_win.bat` file. You can modify this .bat file or create a shortcut and add one or more additional arguments if desired.
|
||||
|
||||
Alternatively, with the virtual environment loaded, run the python file at `tagstudio\tagstudio.py` from your terminal. If you're in the project's root directory, simply run `python3 tagstudio/tagstudio.py`.
|
||||
Alternatively, with the virtual environment loaded, run the python file at `tagstudio\tag_studio.py` from your terminal. If you're in the project's root directory, simply run `python3 tagstudio/tag_studio.py`.
|
||||
|
||||
> [!CAUTION]
|
||||
> TagStudio on Linux & macOS likely won't function correctly at this time. If you're trying to run this in order to help test, debug, and improve compatibility, then charge on ahead!
|
||||
|
||||
#### macOS
|
||||
|
||||
With the virtual environment loaded, run the python file at "tagstudio/tagstudio.py" from your terminal. If you're in the project's root directory, simply run `python3 tagstudio/tagstudio.py`. When launching the program in the future, remember to activate the virtual environment each time before launching *(an easier method is currently being worked on).*
|
||||
With the virtual environment loaded, run the python file at "tagstudio/tag_studio.py" from your terminal. If you're in the project's root directory, simply run `python3 tagstudio/tag_studio.py`. When launching the program in the future, remember to activate the virtual environment each time before launching *(an easier method is currently being worked on).*
|
||||
|
||||
#### Linux
|
||||
|
||||
Run the "TagStudio.sh" script, and the program should launch! (Make sure that the script is marked as executable). Note that launching from the script from outside of a terminal will not launch a terminal window with any debug or crash information. If you wish to see this information, just launch the shell script directly from your terminal with `sh TagStudio.sh`.
|
||||
|
||||
##### NixOS
|
||||
|
||||
Use the provided `flake.nix` file to create and enter a working environment by running `nix develop`. Then, run the above `TagStudio.sh` script.
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating/Opening a Library
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#! /bin/bash
|
||||
#! /usr/bin/env bash
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
python tagstudio/tagstudio.py
|
||||
python tagstudio/tag_studio.py
|
||||
|
||||
@@ -33,7 +33,7 @@ The Library is how TagStudio represents your chosen directory. In this Library o
|
||||
|
||||
## Fields
|
||||
|
||||
Fields are the the building blocks of metadata stored in Entires. Fields have several base types for representing different types of information, including:
|
||||
Fields are the building blocks of metadata stored in Entires. Fields have several base types for representing different types of information, including:
|
||||
|
||||
- `text_line`
|
||||
- A string of text, displayed as a single line.
|
||||
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1712473363,
|
||||
"narHash": "sha256-TIScFAVdI2yuybMxxNjC4YZ/j++c64wwuKbpnZnGiyU=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e89cf1c932006531f454de7d652163a9a5c86668",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
70
flake.nix
Normal file
70
flake.nix
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = { self, nixpkgs, }:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.x86_64-linux;
|
||||
in {
|
||||
devShells.x86_64-linux.default = pkgs.mkShell {
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
|
||||
pkgs.gcc-unwrapped
|
||||
pkgs.zlib
|
||||
pkgs.libglvnd
|
||||
pkgs.glib
|
||||
pkgs.stdenv.cc.cc
|
||||
pkgs.fontconfig
|
||||
pkgs.libxkbcommon
|
||||
pkgs.xorg.libxcb
|
||||
pkgs.freetype
|
||||
pkgs.dbus
|
||||
pkgs.qt6.qtwayland
|
||||
pkgs.qt6.full
|
||||
pkgs.qt6.qtbase
|
||||
pkgs.zstd
|
||||
];
|
||||
buildInputs = with pkgs; [
|
||||
cmake
|
||||
gdb
|
||||
zstd
|
||||
qt6.qtbase
|
||||
qt6.full
|
||||
qt6.qtwayland
|
||||
qtcreator
|
||||
python312Packages.pip
|
||||
python312Full
|
||||
python312Packages.virtualenv # run virtualenv .
|
||||
python312Packages.pyusb # fixes the pyusb 'No backend available' when installed directly via pip
|
||||
|
||||
libgcc
|
||||
makeWrapper
|
||||
bashInteractive
|
||||
glib
|
||||
libxkbcommon
|
||||
freetype
|
||||
binutils
|
||||
dbus
|
||||
coreutils
|
||||
libGL
|
||||
libGLU
|
||||
fontconfig
|
||||
xorg.libxcb
|
||||
|
||||
|
||||
# this is for the shellhook portion
|
||||
qt6.wrapQtAppsHook
|
||||
makeWrapper
|
||||
bashInteractive
|
||||
];
|
||||
# set the environment variables that Qt apps expect
|
||||
shellHook = ''
|
||||
export QT_QPA_PLATFORM=wayland
|
||||
export LIBRARY_PATH=/usr/lib:/usr/lib64:$LIBRARY_PATH
|
||||
# export LD_LIBRARY_PATH=${pkgs.stdenv.cc.cc.lib}/lib/:/run/opengl-driver/lib/
|
||||
export QT_PLUGIN_PATH=${pkgs.qt6.qtbase}/${pkgs.qt6.qtbase.qtPluginPrefix}
|
||||
bashdir=$(mktemp -d)
|
||||
makeWrapper "$(type -p bash)" "$bashdir/bash" "''${qtWrapperArgs[@]}"
|
||||
exec "$bashdir/bash"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,2 +1,2 @@
|
||||
@echo off
|
||||
.venv\Scripts\python.exe .\TagStudio\tagstudio.py --ui qt %*
|
||||
.venv\Scripts\python.exe .\TagStudio\tag_studio.py --ui qt %*
|
||||
@@ -493,7 +493,7 @@ class CliDriver:
|
||||
# print(f'Char Limit: {char_limit}, Len: {len(text)}')
|
||||
return char_limit
|
||||
|
||||
def truncate_text(self, text: str) -> int:
|
||||
def truncate_text(self, text: str) -> str:
|
||||
"""Returns a truncated string for displaying, calculated with `get_char_limit()`."""
|
||||
if len(text) > self.get_char_limit(text):
|
||||
# print(f'Char Limit: {self.get_char_limit(text)}, Len: {len(text)}')
|
||||
@@ -761,7 +761,7 @@ class CliDriver:
|
||||
offset = 1 if (index >= row_count) and (
|
||||
row_number != row_count) else 0
|
||||
elif displayable % table_size != 0:
|
||||
if col_num > 1 and col_num <= displayable % table_size:
|
||||
if 1 < col_num <= displayable % table_size:
|
||||
offset += col_num - 1
|
||||
elif col_num > 1 and col_num > displayable % table_size:
|
||||
offset = displayable % table_size
|
||||
@@ -1022,30 +1022,31 @@ class CliDriver:
|
||||
"""
|
||||
was_executed:bool = False
|
||||
message:str = ''
|
||||
com_name = com[0].lower()
|
||||
|
||||
# Backup Library =======================================================
|
||||
if (com[0].lower() == 'backup'):
|
||||
if com_name == 'backup':
|
||||
self.backup_library(display_message=False)
|
||||
was_executed = True
|
||||
message=f'{INFO} Backed up Library to disk.'
|
||||
# Create Collage =======================================================
|
||||
elif (com[0].lower() == 'collage'):
|
||||
elif com_name == 'collage':
|
||||
filename = self.create_collage()
|
||||
if filename:
|
||||
was_executed = True
|
||||
message = f'{INFO} Saved collage to \"{filename}\".'
|
||||
# Save Library =========================================================
|
||||
elif (com[0].lower() == 'save' or com[0].lower() == 'write' or com[0].lower() == 'w'):
|
||||
elif com_name in ('save', 'write', 'w'):
|
||||
self.save_library(display_message=False)
|
||||
was_executed = True
|
||||
message=f'{INFO} Library saved to disk.'
|
||||
# Toggle Debug =========================================================
|
||||
elif (com[0].lower() == 'toggle-debug'):
|
||||
elif com_name == 'toggle-debug':
|
||||
self.args.debug = not self.args.debug
|
||||
was_executed = True
|
||||
message=f'{INFO} Debug Mode Active.' if self.args.debug else f'{INFO} Debug Mode Deactivated.'
|
||||
# Toggle External Preview ==============================================
|
||||
elif (com[0].lower() == 'toggle-external-preview'):
|
||||
elif com_name == 'toggle-external-preview':
|
||||
self.args.external_preview = not self.args.external_preview
|
||||
if self.args.external_preview:
|
||||
self.init_external_preview()
|
||||
@@ -1054,11 +1055,11 @@ class CliDriver:
|
||||
was_executed = True
|
||||
message=f'{INFO} External Preview Enabled.' if self.args.external_preview else f'{INFO} External Preview Disabled.'
|
||||
# Quit =================================================================
|
||||
elif com[0].lower() == 'quit' or com[0].lower() == 'q':
|
||||
elif com_name in ('quit', 'q'):
|
||||
self.exit(save=True, backup=False)
|
||||
was_executed = True
|
||||
# Quit without Saving ==================================================
|
||||
elif com[0].lower() == 'quit!' or com[0].lower() == 'q!':
|
||||
elif com_name in ('quit!', 'q!'):
|
||||
self.exit(save=False, backup=False)
|
||||
was_executed = True
|
||||
|
||||
@@ -1345,7 +1346,7 @@ class CliDriver:
|
||||
# self.scr_library_home(clear_scr=False)
|
||||
# Add New Entries ==================================================
|
||||
elif ' '.join(com) == 'add new':
|
||||
if self.is_new_file_count_init == False:
|
||||
if not self.is_new_file_count_init:
|
||||
print(
|
||||
f'{INFO} Scanning for files in \'{self.lib.library_dir}\' (This may take a while)...')
|
||||
# if not self.lib.files_not_in_library:
|
||||
@@ -1390,7 +1391,7 @@ class CliDriver:
|
||||
for unresolved in self.lib.missing_matches:
|
||||
res = self.scr_choose_missing_match(
|
||||
self.lib.get_entry_id_from_filepath(unresolved), clear_scr=False)
|
||||
if res != None and int(res) >= 0:
|
||||
if res is not None and int(res) >= 0:
|
||||
clear()
|
||||
print(
|
||||
f'{INFO} Updated {self.lib.entries[self.lib.get_entry_id_from_filepath(unresolved)].path} -> {self.lib.missing_matches[unresolved][res]}')
|
||||
@@ -1555,7 +1556,7 @@ class CliDriver:
|
||||
|
||||
print(self.format_title(title))
|
||||
|
||||
if len(self.filtered_entries) > 0:
|
||||
if self.filtered_entries:
|
||||
# entry = self.lib.get_entry_from_index(
|
||||
# self.filtered_entries[index])
|
||||
entry = self.lib.get_entry(self.filtered_entries[index][1])
|
||||
@@ -1580,7 +1581,7 @@ class CliDriver:
|
||||
|
||||
self.print_fields(self.filtered_entries[index][1])
|
||||
else:
|
||||
if len(self.lib.entries) > 0:
|
||||
if self.lib.entries:
|
||||
print(self.format_h1('No Entry Results for Query', color=BRIGHT_RED_FG))
|
||||
self.set_external_preview_default()
|
||||
else:
|
||||
@@ -2049,7 +2050,7 @@ class CliDriver:
|
||||
'<#> Quit', BRIGHT_CYAN_FG))
|
||||
print('> ', end='')
|
||||
|
||||
com: list[str] = input().lstrip().rstrip().split(' ')
|
||||
com: list[str] = input().strip().split(' ')
|
||||
gc, message = self.global_commands(com)
|
||||
if gc:
|
||||
if message:
|
||||
@@ -2057,6 +2058,7 @@ class CliDriver:
|
||||
print(message)
|
||||
clear_scr=False
|
||||
else:
|
||||
com_name = com[0].lower()
|
||||
|
||||
try:
|
||||
# # Quit =========================================================
|
||||
@@ -2069,13 +2071,13 @@ class CliDriver:
|
||||
# # self.cleanup()
|
||||
# sys.exit()
|
||||
# Cancel =======================================================
|
||||
if (com[0].lower() == 'cancel' or com[0].lower() == 'c' or com[0] == '0') and required==False:
|
||||
if com_name in ('cancel', 'c', '0') and not required:
|
||||
clear()
|
||||
return -1
|
||||
# Selection ====================================================
|
||||
elif int(com[0]) > 0 and int(com[0]) <= len(choices):
|
||||
elif com_name.isdigit() and 0 < int(com_name) <= len(choices):
|
||||
clear()
|
||||
return int(com[0]) - 1
|
||||
return int(com_name) - 1
|
||||
else:
|
||||
# invalid_input = True
|
||||
# print(self.format_h1(str='Please Enter a Valid Selection Number', color=BRIGHT_RED_FG))
|
||||
@@ -2554,7 +2556,7 @@ class CliDriver:
|
||||
f'Enter #{plural} Cancel', fg_color))
|
||||
print('> ', end='')
|
||||
|
||||
com: list[int] = input().split(' ')
|
||||
com: list[str] = input().split(' ')
|
||||
selected_ids: list[int] = []
|
||||
try:
|
||||
for c in com:
|
||||
@@ -2625,14 +2627,14 @@ class CliDriver:
|
||||
self.lib.update_entry_field(
|
||||
entry_index, field_index, new_content.rstrip('\n').rstrip('\r'), 'replace')
|
||||
|
||||
def scr_list_tags(self, query: str = '', tag_ids: list[int] = [], clear_scr=True) -> None:
|
||||
def scr_list_tags(self, query: str = '', tag_ids: list[int] = None, clear_scr=True) -> None:
|
||||
"""A screen for listing out and performing CRUD operations on Library Tags."""
|
||||
# NOTE: While a screen that just displays the first 40 or so random tags on your screen
|
||||
# isn't really that useful, this is just a temporary measure to provide a launchpad
|
||||
# screen for necessary commands such as adding and editing tags.
|
||||
# A more useful screen presentation might look like a list of ranked occurrences, but
|
||||
# that can be figured out and implemented later.
|
||||
|
||||
tag_ids = tag_ids or []
|
||||
title = f'{self.base_title} - Library \'{self.lib.library_dir}\''
|
||||
|
||||
|
||||
@@ -2673,7 +2675,7 @@ class CliDriver:
|
||||
'Create Edit <#> Delete <#> Search <Query> Close/Done', BRIGHT_MAGENTA_FG))
|
||||
print('> ', end='')
|
||||
|
||||
com: list[str] = input().lstrip().rstrip().split(' ')
|
||||
com: list[str] = input().strip().split(' ')
|
||||
gc, message = self.global_commands(com)
|
||||
if gc:
|
||||
if message:
|
||||
@@ -2681,9 +2683,9 @@ class CliDriver:
|
||||
print(message)
|
||||
clear_scr=False
|
||||
else:
|
||||
|
||||
com_name = com[0].lower()
|
||||
# Search Tags ==========================================================
|
||||
if (com[0].lower() == 'search' or com[0].lower() == 's'):
|
||||
if com_name in ('search', 's'):
|
||||
if len(com) > 1:
|
||||
new_query: str = ' '.join(com[1:])
|
||||
# self.scr_list_tags(prev_scr, query=new_query,
|
||||
@@ -2696,7 +2698,7 @@ class CliDriver:
|
||||
tag_ids=self.lib.search_tags('')
|
||||
# return
|
||||
# Edit Tag ===========================================================
|
||||
elif com[0].lower() == 'edit' or com[0].lower() == 'e':
|
||||
elif com_name in ('edit', 'e'):
|
||||
if len(com) > 1:
|
||||
try:
|
||||
index = int(com[1]) - 1
|
||||
@@ -2720,7 +2722,7 @@ class CliDriver:
|
||||
# return
|
||||
|
||||
# Create Tag ============================================================
|
||||
elif com[0].lower() == 'create' or com[0].lower() == 'mk':
|
||||
elif com_name in ('create', 'mk'):
|
||||
tag = Tag(id=0, name='New Tag', shorthand='',
|
||||
aliases=[], subtags_ids=[], color='')
|
||||
self.scr_manage_tag(
|
||||
@@ -2731,7 +2733,7 @@ class CliDriver:
|
||||
# self.scr_list_tags(prev_scr, query=query, tag_ids=tag_ids)
|
||||
# return
|
||||
# Delete Tag ===========================================================
|
||||
elif com[0].lower() == 'delete' or com[0].lower() == 'del':
|
||||
elif com_name in ('delete', 'del'):
|
||||
if len(com) > 1:
|
||||
if len(com) > 1:
|
||||
try:
|
||||
@@ -2757,7 +2759,7 @@ class CliDriver:
|
||||
# tag_ids=tag_ids, clear_scr=False)
|
||||
# return
|
||||
# Close View ===========================================================
|
||||
elif (com[0].lower() == 'close' or com[0].lower() == 'c' or com[0].lower() == 'done'):
|
||||
elif com_name in ('close', 'c', 'done'):
|
||||
# prev_scr()
|
||||
return
|
||||
# # Quit =================================================================
|
||||
@@ -3192,7 +3194,7 @@ class CliDriver:
|
||||
|
||||
selected: str = input()
|
||||
try:
|
||||
if int(selected) > 0 and int(selected) <= len(colors):
|
||||
if selected.isdigit() and 0 < int(selected) <= len(colors):
|
||||
selected = colors[int(selected)-1]
|
||||
return selected
|
||||
# except SystemExit:
|
||||
|
||||
@@ -5,25 +5,21 @@
|
||||
"""The Library object and related methods for TagStudio."""
|
||||
|
||||
import datetime
|
||||
from enum import Enum
|
||||
import os
|
||||
import traceback
|
||||
from typing import Optional
|
||||
import json
|
||||
import glob
|
||||
from pathlib import Path
|
||||
# from typing_extensions import deprecated
|
||||
import src.core.ts_core as ts_core
|
||||
from src.core.utils.web import *
|
||||
from src.core.utils.str import *
|
||||
from src.core.utils.fs import *
|
||||
import xml.etree.ElementTree as ET
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import traceback
|
||||
import xml.etree.ElementTree as ET
|
||||
from enum import Enum
|
||||
import ujson
|
||||
|
||||
from tagstudio.src.core.json_typing import Json_Collation, Json_Entry, Json_Libary, Json_Tag
|
||||
from src.core.json_typing import Json_Collation, Json_Entry, Json_Libary, Json_Tag
|
||||
from src.core import ts_core
|
||||
from src.core.utils.str import strip_punctuation
|
||||
from src.core.utils.web import strip_web_protocol
|
||||
|
||||
TYPE = ['file', 'meta', 'alt', 'mask']
|
||||
# RESULT_TYPE = Enum('Result', ['ENTRY', 'COLLATION', 'TAG_GROUP'])
|
||||
@@ -136,7 +132,7 @@ class Entry:
|
||||
# if self.fields:
|
||||
# if field_index != -1:
|
||||
# logging.info(f'[LIBRARY] ADD TAG to E:{self.id}, F-DI:{field_id}, F-INDEX:{field_index}')
|
||||
field_index = -1 if field_index == None else field_index
|
||||
field_index = -1 if field_index is None else field_index
|
||||
for i, f in enumerate(self.fields):
|
||||
if library.get_field_attr(f, 'id') == field_id:
|
||||
field_index = i
|
||||
@@ -330,6 +326,9 @@ class Library:
|
||||
# That filename can then be used to provide quick lookup to image metadata entries in the Library.
|
||||
# NOTE: On Windows, these strings are always lowercase.
|
||||
self.filename_to_entry_id_map: dict[str, int] = {}
|
||||
# A list of file extensions to be ignored by TagStudio.
|
||||
self.default_ext_blacklist: list = ['json', 'xmp', 'aae']
|
||||
self.ignored_extensions: list = self.default_ext_blacklist
|
||||
|
||||
# Tags =================================================================
|
||||
# List of every Tag object (ts-v8).
|
||||
@@ -619,6 +618,10 @@ class Library:
|
||||
self.verify_ts_folders()
|
||||
major, minor, patch = json_dump['ts-version'].split('.')
|
||||
|
||||
# Load Extension Blacklist ---------------------------------
|
||||
if 'ignored_extensions' in json_dump.keys():
|
||||
self.ignored_extensions = json_dump['ignored_extensions']
|
||||
|
||||
# Parse Tags ---------------------------------------------------
|
||||
if 'tags' in json_dump.keys():
|
||||
start_time = time.time()
|
||||
@@ -631,45 +634,37 @@ class Library:
|
||||
# Step 2: Create a Tag object and append it to the internal Tags list,
|
||||
# then map that Tag's ID to its index in the Tags list.
|
||||
|
||||
id = 0
|
||||
if 'id' in tag.keys():
|
||||
id = tag['id']
|
||||
id = int(tag.get('id', 0))
|
||||
|
||||
if int(id) >= self._next_tag_id:
|
||||
self._next_tag_id = int(id) + 1
|
||||
# Don't load tags with duplicate IDs
|
||||
if id not in {t.id for t in self.tags}:
|
||||
if id >= self._next_tag_id:
|
||||
self._next_tag_id = id + 1
|
||||
|
||||
name = ''
|
||||
if 'name' in tag.keys():
|
||||
name = tag['name']
|
||||
shorthand = ''
|
||||
if 'shorthand' in tag.keys():
|
||||
shorthand = tag['shorthand']
|
||||
aliases = []
|
||||
if 'aliases' in tag.keys():
|
||||
aliases = tag['aliases']
|
||||
subtag_ids = []
|
||||
if 'subtag_ids' in tag.keys():
|
||||
subtag_ids = tag['subtag_ids']
|
||||
color = ''
|
||||
if 'color' in tag.keys():
|
||||
color = tag['color']
|
||||
name = tag.get('name', '')
|
||||
shorthand = tag.get('shorthand', '')
|
||||
aliases = tag.get('aliases', [])
|
||||
subtag_ids = tag.get('subtag_ids', [])
|
||||
color = tag.get('color', '')
|
||||
|
||||
t = Tag(
|
||||
id=int(id),
|
||||
name=name,
|
||||
shorthand=shorthand,
|
||||
aliases=aliases,
|
||||
subtags_ids=subtag_ids,
|
||||
color=color
|
||||
)
|
||||
t = Tag(
|
||||
id=id,
|
||||
name=name,
|
||||
shorthand=shorthand,
|
||||
aliases=aliases,
|
||||
subtags_ids=subtag_ids,
|
||||
color=color
|
||||
)
|
||||
|
||||
# NOTE: This does NOT use the add_tag_to_library() method!
|
||||
# That method is only used for Tags added at runtime.
|
||||
# This process uses the same inner methods, but waits until all of the
|
||||
# Tags are registered in the Tags list before creating the Tag clusters.
|
||||
self.tags.append(t)
|
||||
self._map_tag_id_to_index(t, -1)
|
||||
self._map_tag_strings_to_tag_id(t)
|
||||
# NOTE: This does NOT use the add_tag_to_library() method!
|
||||
# That method is only used for Tags added at runtime.
|
||||
# This process uses the same inner methods, but waits until all of the
|
||||
# Tags are registered in the Tags list before creating the Tag clusters.
|
||||
self.tags.append(t)
|
||||
self._map_tag_id_to_index(t, -1)
|
||||
self._map_tag_strings_to_tag_id(t)
|
||||
else:
|
||||
logging.info(f'[LIBRARY]Skipping Tag with duplicate ID: {tag}')
|
||||
|
||||
# Step 3: Map each Tag's subtags together now that all Tag objects in it.
|
||||
for t in self.tags:
|
||||
@@ -679,12 +674,11 @@ class Library:
|
||||
logging.info(f'[LIBRARY] Tags loaded in {(end_time - start_time):.3f} seconds')
|
||||
|
||||
# Parse Entries ------------------------------------------------
|
||||
if 'entries' in json_dump.keys():
|
||||
if entries := json_dump.get('entries'):
|
||||
start_time = time.time()
|
||||
for entry in json_dump['entries']:
|
||||
for entry in entries:
|
||||
|
||||
id = 0
|
||||
if 'id' in entry.keys():
|
||||
if 'id' in entry:
|
||||
id = int(entry['id'])
|
||||
if id >= self._next_entry_id:
|
||||
self._next_entry_id = id + 1
|
||||
@@ -693,16 +687,12 @@ class Library:
|
||||
id = self._next_entry_id
|
||||
self._next_entry_id += 1
|
||||
|
||||
filename = ''
|
||||
if 'filename' in entry.keys():
|
||||
filename = entry['filename']
|
||||
e_path = ''
|
||||
if 'path' in entry.keys():
|
||||
e_path = entry['path']
|
||||
filename = entry.get('filename', '')
|
||||
e_path = entry.get('path', '')
|
||||
fields = []
|
||||
if 'fields' in entry.keys():
|
||||
if 'fields' in entry:
|
||||
# Cast JSON str keys to ints
|
||||
for f in entry['fields']:
|
||||
for f in fields:
|
||||
f[int(list(f.keys())[0])
|
||||
] = f[list(f.keys())[0]]
|
||||
del f[list(f.keys())[0]]
|
||||
@@ -764,28 +754,17 @@ class Library:
|
||||
# the internal Collations list, then map that
|
||||
# Collation's ID to its index in the Collations list.
|
||||
|
||||
id = 0
|
||||
if 'id' in collation.keys():
|
||||
id = collation['id']
|
||||
id = int(collation.get('id', 0))
|
||||
if id >= self._next_collation_id:
|
||||
self._next_collation_id = id + 1
|
||||
|
||||
if int(id) >= self._next_collation_id:
|
||||
self._next_collation_id = int(id) + 1
|
||||
|
||||
title = ''
|
||||
if 'title' in collation.keys():
|
||||
title = collation['title']
|
||||
e_ids_and_pages = ''
|
||||
if 'e_ids_and_pages' in collation.keys():
|
||||
e_ids_and_pages = collation['e_ids_and_pages']
|
||||
sort_order = []
|
||||
if 'sort_order' in collation.keys():
|
||||
sort_order = collation['sort_order']
|
||||
cover_id = []
|
||||
if 'cover_id' in collation.keys():
|
||||
cover_id = collation['cover_id']
|
||||
title = collation.get('title', '')
|
||||
e_ids_and_pages = collation.get('e_ids_and_pages', '')
|
||||
sort_order = collation.get('sort_order', [])
|
||||
cover_id = collation.get('cover_id', [])
|
||||
|
||||
c = Collation(
|
||||
id=int(id),
|
||||
id=id,
|
||||
title=title,
|
||||
e_ids_and_pages=e_ids_and_pages,
|
||||
sort_order=sort_order,
|
||||
@@ -852,7 +831,9 @@ class Library:
|
||||
Creates a JSON serialized string from the Library object.
|
||||
Used in saving the library to disk.
|
||||
"""
|
||||
|
||||
file_to_save: Json_Libary = {"ts-version": ts_core.VERSION,
|
||||
"ignored_extensions": [],
|
||||
"tags": [],
|
||||
"collations": [],
|
||||
"fields": [],
|
||||
@@ -861,6 +842,9 @@ class Library:
|
||||
}
|
||||
|
||||
print('[LIBRARY] Formatting Tags to JSON...')
|
||||
|
||||
file_to_save['ignored_extensions'] = [i for i in self.ignored_extensions if i is not '']
|
||||
|
||||
for tag in self.tags:
|
||||
file_to_save["tags"].append(tag.compressed_dict())
|
||||
|
||||
@@ -928,6 +912,7 @@ class Library:
|
||||
self.missing_files.clear()
|
||||
self.fixed_files.clear()
|
||||
self.filename_to_entry_id_map: dict[str, int] = {}
|
||||
self.ignored_extensions = self.default_ext_blacklist
|
||||
|
||||
self.tags.clear()
|
||||
self._next_tag_id: int = 1000
|
||||
@@ -953,7 +938,7 @@ class Library:
|
||||
# p = Path(os.path.normpath(f))
|
||||
if ('$RECYCLE.BIN' not in f and ts_core.TS_FOLDER_NAME not in f
|
||||
and 'tagstudio_thumbs' not in f and not os.path.isdir(f)):
|
||||
if os.path.splitext(f)[1][1:].lower() in ts_core.ALL_FILE_TYPES:
|
||||
if os.path.splitext(f)[1][1:].lower() not in self.ignored_extensions:
|
||||
self.dir_file_count += 1
|
||||
file = str(os.path.relpath(f, self.library_dir))
|
||||
|
||||
@@ -1386,20 +1371,20 @@ class Library:
|
||||
query: str = query.strip().lower()
|
||||
query_words: list[str] = query.split(' ')
|
||||
all_tag_terms: list[str] = []
|
||||
only_untagged: bool = True if 'untagged' in query or 'no tags' in query else False
|
||||
only_empty: bool = True if 'empty' in query or 'no fields' in query else False
|
||||
only_missing: bool = True if 'missing' in query or 'no file' in query else False
|
||||
allow_adv: bool = True if 'filename:' in query_words else False
|
||||
tag_only: bool = True if 'tag_id:' in query_words else False
|
||||
only_untagged: bool = ('untagged' in query or 'no tags' in query)
|
||||
only_empty: bool = ('empty' in query or 'no fields' in query)
|
||||
only_missing: bool = ('missing' in query or 'no file' in query)
|
||||
allow_adv: bool = 'filename:' in query_words
|
||||
tag_only: bool = 'tag_id:' in query_words
|
||||
if allow_adv:
|
||||
query_words.remove('filename:')
|
||||
if tag_only:
|
||||
query_words.remove('tag_id:')
|
||||
# TODO: Expand this to allow for dynamic fields to work.
|
||||
only_no_author: bool = True if 'no author' in query or 'no artist' in query else False
|
||||
only_no_author: bool = ('no author' in query or 'no artist' in query)
|
||||
|
||||
# Preprocess the Tag terms.
|
||||
if len(query_words) > 0:
|
||||
if query_words:
|
||||
for i, term in enumerate(query_words):
|
||||
for j, term in enumerate(query_words):
|
||||
if query_words[i:j+1] and " ".join(query_words[i:j+1]) in self._tag_strings_to_id_map:
|
||||
@@ -1419,101 +1404,103 @@ class Library:
|
||||
# non_entry_count = 0
|
||||
# Iterate over all Entries =============================================================
|
||||
for entry in self.entries:
|
||||
allowed_ext: bool = os.path.splitext(entry.filename)[1][1:].lower() not in self.ignored_extensions
|
||||
# try:
|
||||
# entry: Entry = self.entries[self.file_to_library_index_map[self._source_filenames[i]]]
|
||||
# print(f'{entry}')
|
||||
|
||||
# If the entry has tags of any kind, append them to this main tag list.
|
||||
entry_tags: list[int] = []
|
||||
entry_authors: list[str] = []
|
||||
if entry.fields:
|
||||
for field in entry.fields:
|
||||
field_id = list(field.keys())[0]
|
||||
if self.get_field_obj(field_id)['type'] == 'tag_box':
|
||||
entry_tags.extend(field[field_id])
|
||||
if self.get_field_obj(field_id)['name'] == 'Author':
|
||||
entry_authors.extend(field[field_id])
|
||||
if self.get_field_obj(field_id)['name'] == 'Artist':
|
||||
entry_authors.extend(field[field_id])
|
||||
if allowed_ext:
|
||||
# If the entry has tags of any kind, append them to this main tag list.
|
||||
entry_tags: list[int] = []
|
||||
entry_authors: list[str] = []
|
||||
if entry.fields:
|
||||
for field in entry.fields:
|
||||
field_id = list(field.keys())[0]
|
||||
if self.get_field_obj(field_id)['type'] == 'tag_box':
|
||||
entry_tags.extend(field[field_id])
|
||||
if self.get_field_obj(field_id)['name'] == 'Author':
|
||||
entry_authors.extend(field[field_id])
|
||||
if self.get_field_obj(field_id)['name'] == 'Artist':
|
||||
entry_authors.extend(field[field_id])
|
||||
|
||||
# print(f'Entry Tags: {entry_tags}')
|
||||
# print(f'Entry Tags: {entry_tags}')
|
||||
|
||||
# Add Entries from special flags -------------------------------
|
||||
# TODO: Come up with a more user-resistent way to 'archived' and 'favorite' tags.
|
||||
if only_untagged:
|
||||
if not entry_tags:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_no_author:
|
||||
if not entry_authors:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_empty:
|
||||
if not entry.fields:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_missing:
|
||||
if os.path.normpath(f'{self.library_dir}/{entry.path}/{entry.filename}') in self.missing_files:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
|
||||
# elif query == "archived":
|
||||
# if entry.tags and self._tag_names_to_tag_id_map[self.archived_word.lower()][0] in entry.tags:
|
||||
# self.filtered_file_list.append(file)
|
||||
# pb.value = len(self.filtered_file_list)
|
||||
# elif query in entry.path.lower():
|
||||
|
||||
# NOTE: This searches path and filenames.
|
||||
if allow_adv:
|
||||
if [q for q in query_words if (q in entry.path.lower())]:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif [q for q in query_words if (q in entry.filename.lower())]:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif tag_only:
|
||||
if entry.has_tag(self, int(query_words[0])):
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
|
||||
# elif query in entry.filename.lower():
|
||||
# self.filtered_entries.append(index)
|
||||
elif entry_tags:
|
||||
# For each verified, extracted Tag term.
|
||||
failure_to_union_terms = False
|
||||
for term in all_tag_terms:
|
||||
# If the term from the previous loop was already verified:
|
||||
if not failure_to_union_terms:
|
||||
cluster: set = set()
|
||||
# Add the immediate associated Tags to the set (ex. Name, Alias hits)
|
||||
# Since this term could technically map to multiple IDs, iterate over it
|
||||
# (You're 99.9999999% likely to just get 1 item)
|
||||
for id in self._tag_strings_to_id_map[term]:
|
||||
cluster.add(id)
|
||||
cluster = cluster.union(
|
||||
set(self.get_tag_cluster(id)))
|
||||
# print(f'Full Cluster: {cluster}')
|
||||
# For each of the Tag IDs in the term's ID cluster:
|
||||
for t in cluster:
|
||||
# Assume that this ID from the cluster is not in the Entry.
|
||||
# Wait to see if proven wrong.
|
||||
failure_to_union_terms = True
|
||||
# If the ID actually is in the Entry,
|
||||
if t in entry_tags:
|
||||
# There wasn't a failure to find one of the term's cluster IDs in the Entry.
|
||||
# There is also no more need to keep checking the rest of the terms in the cluster.
|
||||
failure_to_union_terms = False
|
||||
# print(f'FOUND MATCH: {t}')
|
||||
break
|
||||
# print(f'\tFailure to Match: {t}')
|
||||
# If there even were tag terms to search through AND they all match an entry
|
||||
if all_tag_terms and not failure_to_union_terms:
|
||||
# self.filter_entries.append()
|
||||
# self.filtered_file_list.append(file)
|
||||
# results.append((SearchItemType.ENTRY, entry.id))
|
||||
added = False
|
||||
for f in entry.fields:
|
||||
if self.get_field_attr(f, 'type') == 'collation':
|
||||
if (self.get_field_attr(f, 'content') not in collations_added):
|
||||
results.append((ItemType.COLLATION, self.get_field_attr(f, 'content')))
|
||||
collations_added.append(self.get_field_attr(f, 'content'))
|
||||
added = True
|
||||
|
||||
if not added:
|
||||
# Add Entries from special flags -------------------------------
|
||||
# TODO: Come up with a more user-resistent way to 'archived' and 'favorite' tags.
|
||||
if only_untagged:
|
||||
if not entry_tags:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_no_author:
|
||||
if not entry_authors:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_empty:
|
||||
if not entry.fields:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif only_missing:
|
||||
if os.path.normpath(f'{self.library_dir}/{entry.path}/{entry.filename}') in self.missing_files:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
|
||||
# elif query == "archived":
|
||||
# if entry.tags and self._tag_names_to_tag_id_map[self.archived_word.lower()][0] in entry.tags:
|
||||
# self.filtered_file_list.append(file)
|
||||
# pb.value = len(self.filtered_file_list)
|
||||
# elif query in entry.path.lower():
|
||||
|
||||
# NOTE: This searches path and filenames.
|
||||
if allow_adv:
|
||||
if [q for q in query_words if (q in entry.path.lower())]:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif [q for q in query_words if (q in entry.filename.lower())]:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
elif tag_only:
|
||||
if entry.has_tag(self, int(query_words[0])):
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
|
||||
# elif query in entry.filename.lower():
|
||||
# self.filtered_entries.append(index)
|
||||
elif entry_tags:
|
||||
# For each verified, extracted Tag term.
|
||||
failure_to_union_terms = False
|
||||
for term in all_tag_terms:
|
||||
# If the term from the previous loop was already verified:
|
||||
if not failure_to_union_terms:
|
||||
cluster: set = set()
|
||||
# Add the immediate associated Tags to the set (ex. Name, Alias hits)
|
||||
# Since this term could technically map to multiple IDs, iterate over it
|
||||
# (You're 99.9999999% likely to just get 1 item)
|
||||
for id in self._tag_strings_to_id_map[term]:
|
||||
cluster.add(id)
|
||||
cluster = cluster.union(
|
||||
set(self.get_tag_cluster(id)))
|
||||
# print(f'Full Cluster: {cluster}')
|
||||
# For each of the Tag IDs in the term's ID cluster:
|
||||
for t in cluster:
|
||||
# Assume that this ID from the cluster is not in the Entry.
|
||||
# Wait to see if proven wrong.
|
||||
failure_to_union_terms = True
|
||||
# If the ID actually is in the Entry,
|
||||
if t in entry_tags:
|
||||
# There wasn't a failure to find one of the term's cluster IDs in the Entry.
|
||||
# There is also no more need to keep checking the rest of the terms in the cluster.
|
||||
failure_to_union_terms = False
|
||||
# print(f'FOUND MATCH: {t}')
|
||||
break
|
||||
# print(f'\tFailure to Match: {t}')
|
||||
# If there even were tag terms to search through AND they all match an entry
|
||||
if all_tag_terms and not failure_to_union_terms:
|
||||
# self.filter_entries.append()
|
||||
# self.filtered_file_list.append(file)
|
||||
# results.append((SearchItemType.ENTRY, entry.id))
|
||||
added = False
|
||||
for f in entry.fields:
|
||||
if self.get_field_attr(f, 'type') == 'collation':
|
||||
if (self.get_field_attr(f, 'content') not in collations_added):
|
||||
results.append((ItemType.COLLATION, self.get_field_attr(f, 'content')))
|
||||
collations_added.append(self.get_field_attr(f, 'content'))
|
||||
added = True
|
||||
|
||||
if not added:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
|
||||
# sys.stdout.write(
|
||||
# f'\r[INFO][FILTER]: {len(self.filtered_file_list)} matches found')
|
||||
@@ -1539,21 +1526,23 @@ class Library:
|
||||
|
||||
for entry in self.entries:
|
||||
added = False
|
||||
for f in entry.fields:
|
||||
if self.get_field_attr(f, 'type') == 'collation':
|
||||
if (self.get_field_attr(f, 'content') not in collations_added):
|
||||
results.append((ItemType.COLLATION, self.get_field_attr(f, 'content')))
|
||||
collations_added.append(self.get_field_attr(f, 'content'))
|
||||
added = True
|
||||
allowed_ext: bool = os.path.splitext(entry.filename)[1][1:].lower() not in self.ignored_extensions
|
||||
if allowed_ext:
|
||||
for f in entry.fields:
|
||||
if self.get_field_attr(f, 'type') == 'collation':
|
||||
if (self.get_field_attr(f, 'content') not in collations_added):
|
||||
results.append((ItemType.COLLATION, self.get_field_attr(f, 'content')))
|
||||
collations_added.append(self.get_field_attr(f, 'content'))
|
||||
added = True
|
||||
|
||||
if not added:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
if not added:
|
||||
results.append((ItemType.ENTRY, entry.id))
|
||||
# for file in self._source_filenames:
|
||||
# self.filtered_file_list.append(file)
|
||||
results.reverse()
|
||||
return results
|
||||
|
||||
def search_tags(self, query: str, include_cluster=False, ignore_builtin=False, threshold: int = 1, context: list[str] = []) -> list[int]:
|
||||
def search_tags(self, query: str, include_cluster=False, ignore_builtin=False, threshold: int = 1, context: list[str] = None) -> list[int]:
|
||||
"""Returns a list of Tag IDs returned from a string query."""
|
||||
# tag_ids: list[int] = []
|
||||
# if query:
|
||||
@@ -1646,7 +1635,6 @@ class Library:
|
||||
|
||||
# Contextual Weighing
|
||||
if context and ((len(id_weights) > 1 and len(priority_ids) > 1) or (len(priority_ids) > 1)):
|
||||
context_ids: list[int] = []
|
||||
context_strings: list[str] = [s.replace(' ', '').replace('_', '').replace('-', '').replace(
|
||||
"'", '').replace('(', '').replace(')', '').replace('[', '').replace(']', '').lower() for s in context]
|
||||
for term in context:
|
||||
@@ -1820,16 +1808,16 @@ class Library:
|
||||
|
||||
# Step [3/7]:
|
||||
# Remove ID -> cluster reference.
|
||||
if tag_id in self._tag_id_to_cluster_map.keys():
|
||||
if tag_id in self._tag_id_to_cluster_map:
|
||||
del self._tag_id_to_cluster_map[tag.id]
|
||||
# Remove mentions of this ID in all clusters.
|
||||
for key in self._tag_id_to_cluster_map.keys():
|
||||
if tag_id in self._tag_id_to_cluster_map[key]:
|
||||
self._tag_id_to_cluster_map[key].remove(tag.id)
|
||||
for key, values in self._tag_id_to_cluster_map.items():
|
||||
if tag_id in values:
|
||||
values.remove(tag.id)
|
||||
|
||||
# Step [4/7]:
|
||||
# Remove mapping of this ID to its index in the tags list.
|
||||
if tag.id in self._tag_id_to_index_map.keys():
|
||||
if tag.id in self._tag_id_to_index_map:
|
||||
del self._tag_id_to_index_map[tag.id]
|
||||
|
||||
# Step [5/7]:
|
||||
@@ -1908,7 +1896,7 @@ class Library:
|
||||
if data:
|
||||
|
||||
# Add a Title Field if the data doesn't already exist.
|
||||
if "title" in data.keys() and data["title"]:
|
||||
if data.get("title"):
|
||||
field_id = 0 # Title Field ID
|
||||
if not self.does_field_content_exist(entry_id, field_id, data['title']):
|
||||
self.add_field_to_entry(entry_id, field_id)
|
||||
@@ -1916,7 +1904,7 @@ class Library:
|
||||
entry_id, -1, data["title"], 'replace')
|
||||
|
||||
# Add an Author Field if the data doesn't already exist.
|
||||
if "author" in data.keys() and data["author"]:
|
||||
if data.get("author"):
|
||||
field_id = 1 # Author Field ID
|
||||
if not self.does_field_content_exist(entry_id, field_id, data['author']):
|
||||
self.add_field_to_entry(entry_id, field_id)
|
||||
@@ -1924,7 +1912,7 @@ class Library:
|
||||
entry_id, -1, data["author"], 'replace')
|
||||
|
||||
# Add an Artist Field if the data doesn't already exist.
|
||||
if "artist" in data.keys() and data["artist"]:
|
||||
if data.get("artist"):
|
||||
field_id = 2 # Artist Field ID
|
||||
if not self.does_field_content_exist(entry_id, field_id, data['artist']):
|
||||
self.add_field_to_entry(entry_id, field_id)
|
||||
@@ -1932,7 +1920,7 @@ class Library:
|
||||
entry_id, -1, data["artist"], 'replace')
|
||||
|
||||
# Add a Date Published Field if the data doesn't already exist.
|
||||
if "date_published" in data.keys() and data["date_published"]:
|
||||
if data.get("date_published"):
|
||||
field_id = 14 # Date Published Field ID
|
||||
date = str(datetime.datetime.strptime(
|
||||
data["date_published"], '%Y-%m-%d %H:%M:%S'))
|
||||
@@ -1942,7 +1930,7 @@ class Library:
|
||||
self.update_entry_field(entry_id, -1, date, 'replace')
|
||||
|
||||
# Process String Tags if the data doesn't already exist.
|
||||
if "tags" in data.keys() and data["tags"]:
|
||||
if data.get("tags"):
|
||||
tags_field_id = 6 # Tags Field ID
|
||||
content_tags_field_id = 7 # Content Tags Field ID
|
||||
meta_tags_field_id = 8 # Meta Tags Field ID
|
||||
@@ -1979,7 +1967,7 @@ class Library:
|
||||
matching: list[int] = self.search_tags(
|
||||
tag.replace('_', ' ').replace('-', ' '), include_cluster=False, ignore_builtin=True, threshold=2, context=tags)
|
||||
priority_field_index = -1
|
||||
if len(matching) > 0:
|
||||
if matching:
|
||||
|
||||
# NOTE: The following commented-out code enables the ability
|
||||
# to prefer an existing built-in tag_box field to add to
|
||||
|
||||
@@ -4,18 +4,12 @@
|
||||
|
||||
"""The core classes and methods of TagStudio."""
|
||||
|
||||
import os
|
||||
from types import FunctionType
|
||||
# from typing import Dict, Optional, TypedDict, List
|
||||
import json
|
||||
from pathlib import Path
|
||||
import traceback
|
||||
# import requests
|
||||
# from bs4 import BeautifulSoup as bs
|
||||
from src.core.library import *
|
||||
from src.core.field_template import FieldTemplate
|
||||
import os
|
||||
|
||||
VERSION: str = '9.1.0' # Major.Minor.Patch
|
||||
from src.core.library import Entry, Library
|
||||
|
||||
VERSION: str = '9.2.0' # Major.Minor.Patch
|
||||
VERSION_BRANCH: str = 'Alpha' # 'Alpha', 'Beta', or '' for Full Release
|
||||
|
||||
# The folder & file names where TagStudio keeps its data relative to a library.
|
||||
@@ -37,11 +31,11 @@ SPREADSHEET_TYPES: list[str] = ['csv', 'xls', 'xlsx', 'numbers', 'ods']
|
||||
PRESENTATION_TYPES: list[str] = ['ppt', 'pptx', 'key', 'odp']
|
||||
ARCHIVE_TYPES: list[str] = ['zip', 'rar', 'tar', 'tar.gz', 'tgz', '7z']
|
||||
PROGRAM_TYPES: list[str] = ['exe', 'app']
|
||||
SHORTCUT_TYPES: list[str] = ['lnk', 'desktop']
|
||||
SHORTCUT_TYPES: list[str] = ['lnk', 'desktop', 'url']
|
||||
|
||||
ALL_FILE_TYPES: list[str] = IMAGE_TYPES + VIDEO_TYPES + AUDIO_TYPES + \
|
||||
TEXT_TYPES + SPREADSHEET_TYPES + PRESENTATION_TYPES + \
|
||||
ARCHIVE_TYPES + PROGRAM_TYPES
|
||||
ARCHIVE_TYPES + PROGRAM_TYPES + SHORTCUT_TYPES
|
||||
|
||||
BOX_FIELDS = ['tag_box', 'text_box']
|
||||
TEXT_FIELDS = ['text_line', 'text_box']
|
||||
@@ -143,12 +137,11 @@ class TagStudioCore:
|
||||
# # # print("Could not resolve URL.")
|
||||
# # pass
|
||||
|
||||
def match_conditions(self, entry_id: int) -> str:
|
||||
def match_conditions(self, entry_id: int) -> None:
|
||||
"""Matches defined conditions against a file to add Entry data."""
|
||||
|
||||
cond_file = os.path.normpath(f'{self.lib.library_dir}/{TS_FOLDER_NAME}/conditions.json')
|
||||
# TODO: Make this stored somewhere better instead of temporarily in this JSON file.
|
||||
json_dump = {}
|
||||
entry: Entry = self.lib.get_entry(entry_id)
|
||||
try:
|
||||
if os.path.isfile(cond_file):
|
||||
@@ -161,8 +154,8 @@ class TagStudioCore:
|
||||
match = True
|
||||
break
|
||||
if match:
|
||||
if 'fields' in c.keys() and c['fields']:
|
||||
for field in c['fields']:
|
||||
if fields := c.get('fields'):
|
||||
for field in fields:
|
||||
|
||||
field_id = self.lib.get_field_attr(
|
||||
field, 'id')
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
# Licensed under the GPL-3.0 License.
|
||||
# Created for TagStudio: https://github.com/CyanVoxel/TagStudio
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def clean_folder_name(folder_name: str) -> str:
|
||||
cleaned_name = folder_name
|
||||
invalid_chars = "<>:\"/\\|?*."
|
||||
|
||||
@@ -4,9 +4,8 @@
|
||||
|
||||
"""PySide6 port of the widgets/layouts/flowlayout example from Qt v6.x"""
|
||||
|
||||
import sys
|
||||
from PySide6.QtCore import Qt, QMargins, QPoint, QRect, QSize
|
||||
from PySide6.QtWidgets import QApplication, QLayout, QPushButton, QSizePolicy, QWidget
|
||||
from PySide6.QtWidgets import QLayout, QSizePolicy, QWidget
|
||||
|
||||
|
||||
# class Window(QWidget):
|
||||
|
||||
@@ -12,23 +12,14 @@
|
||||
# Licensed under the GPL-3.0 License.
|
||||
# Created for TagStudio: https://github.com/CyanVoxel/TagStudio
|
||||
|
||||
from re import S
|
||||
import time
|
||||
from typing import Optional
|
||||
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
|
||||
QMetaObject, QObject, QPoint, QRect,
|
||||
QSize, QTime, QUrl, Qt)
|
||||
from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,
|
||||
QFont, QFontDatabase, QGradient, QIcon,
|
||||
QImage, QKeySequence, QLinearGradient, QPainter,
|
||||
QPalette, QPixmap, QRadialGradient, QTransform, QAction)
|
||||
from PySide6.QtWidgets import (QApplication, QComboBox, QFrame, QGridLayout,
|
||||
from PySide6.QtCore import (QCoreApplication, QMetaObject, QRect,
|
||||
QSize, Qt)
|
||||
from PySide6.QtGui import (QFont, QAction)
|
||||
from PySide6.QtWidgets import (QComboBox, QFrame, QGridLayout,
|
||||
QHBoxLayout, QVBoxLayout, QLayout, QLineEdit, QMainWindow,
|
||||
QMenuBar, QPushButton, QScrollArea, QSizePolicy,
|
||||
QStatusBar, QWidget, QSplitter, QMenu)
|
||||
from src.qt.pagination import Pagination
|
||||
# from src.qt.qtacrylic.qtacrylic import WindowEffect
|
||||
# from qframelesswindow import FramelessMainWindow, StandardTitleBar
|
||||
|
||||
|
||||
class Ui_MainWindow(QMainWindow):
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
"""A pagination widget created for TagStudio."""
|
||||
# I never want to see this code again.
|
||||
|
||||
from PySide6 import QtCore
|
||||
from PySide6.QtGui import *
|
||||
from PySide6.QtWidgets import *
|
||||
from PySide6.QtCore import QFile, QObject, QThread, Signal, QRunnable, Qt, QThreadPool, QSize, QEvent, QMimeData
|
||||
from PySide6.QtCore import QObject, Signal, QSize
|
||||
from PySide6.QtGui import QIntValidator
|
||||
from PySide6.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLabel, QLineEdit, QSizePolicy
|
||||
|
||||
|
||||
# class NumberEdit(QLineEdit):
|
||||
# def __init__(self, parent=None) -> None:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Resource object code (Python 3)
|
||||
# Created by: object code
|
||||
# Created by: The Resource Compiler for Qt version 6.5.1
|
||||
# Created by: The Resource Compiler for Qt version 6.6.3
|
||||
# WARNING! All changes made in this file will be lost!
|
||||
|
||||
from PySide6 import QtCore
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,6 @@ from src.cli.ts_cli import CliDriver
|
||||
from src.qt.ts_qt import QtDriver
|
||||
import argparse
|
||||
import traceback
|
||||
# import ctypes
|
||||
|
||||
|
||||
def main():
|
||||
Reference in New Issue
Block a user