Compare commits
170 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af7633183c | ||
|
|
1280b2f855 | ||
|
|
2e0b1f5951 | ||
|
|
548d7491b8 | ||
|
|
ad99115544 | ||
|
|
52919cbc49 | ||
|
|
7f2dc13c31 | ||
|
|
592652cff1 | ||
|
|
6a1724695e | ||
|
|
037210756e | ||
|
|
4e78d83092 | ||
|
|
0e6331fa6a | ||
|
|
c98c5df63c | ||
|
|
0981c74da9 | ||
|
|
31518799f6 | ||
|
|
e8b4b9b48e | ||
|
|
cd06272b38 | ||
|
|
c5ab4352e3 | ||
|
|
0da4a6b70a | ||
|
|
11c5f3387c | ||
|
|
3ed0171e17 | ||
|
|
c7b38b0d70 | ||
|
|
caf0039de4 | ||
|
|
2637e1e42c | ||
|
|
d0a1673965 | ||
|
|
07e5d23f72 | ||
|
|
fb4fdb6857 | ||
|
|
d642a13b6e | ||
|
|
8967031f91 | ||
|
|
89caada4cc | ||
|
|
b3616269bc | ||
|
|
4fa22813ce | ||
|
|
3ee3a0dce0 | ||
|
|
1226721bc0 | ||
|
|
a76eae0cc7 | ||
|
|
53cf2a6f48 | ||
|
|
fe69530b38 | ||
|
|
bf83c1c394 | ||
|
|
84ce43ab93 | ||
|
|
5e0765ee24 | ||
|
|
6edf9398b7 | ||
|
|
30bf9c1bcb | ||
|
|
0b59f3cc7a | ||
|
|
2544fad9ce | ||
|
|
e579ef5834 | ||
|
|
543e9766a1 | ||
|
|
4c3611aba7 | ||
|
|
dadc14563a | ||
|
|
c1eda7305b | ||
|
|
2f4e236350 | ||
|
|
b858936c68 | ||
|
|
78ce30283e | ||
|
|
f85fbb86af | ||
|
|
ed22105ec8 | ||
|
|
7c5af588c7 | ||
|
|
2690a285d9 | ||
|
|
bb60a570d0 | ||
|
|
ca0ab2d7d1 | ||
|
|
38bae7fb46 | ||
|
|
14466c3ff8 | ||
|
|
fe824f9fb4 | ||
|
|
ef5981b473 | ||
|
|
7d1ee70cf6 | ||
|
|
7c72d99619 | ||
|
|
b32887a6d8 | ||
|
|
37a197e7f1 | ||
|
|
74cb3d2c54 | ||
|
|
d19abcabc7 | ||
|
|
f8ae6609c7 | ||
|
|
cbd39ff161 | ||
|
|
f8905a176c | ||
|
|
847288e91f | ||
|
|
446d9d5217 | ||
|
|
3a7a1659f0 | ||
|
|
bc23006a34 | ||
|
|
6090995eba | ||
|
|
60db747d6d | ||
|
|
a7a4141f58 | ||
|
|
2b04cbe239 | ||
|
|
765cc061c1 | ||
|
|
80319385f0 | ||
|
|
29dd906fe0 | ||
|
|
d5dc4028c3 | ||
|
|
0df049d453 | ||
|
|
0bd7c1f685 | ||
|
|
2f08ecabbf | ||
|
|
12af1c80dc | ||
|
|
a52b6e0a55 | ||
|
|
a586cf65e2 | ||
|
|
e2e7882bfa | ||
|
|
4f9c2b9d5f | ||
|
|
5203bcf1ea | ||
|
|
f1e3bc8559 | ||
|
|
b97ca6f064 | ||
|
|
d1ea9874da | ||
|
|
3cd3f87d68 | ||
|
|
582937b866 | ||
|
|
2b8240c156 | ||
|
|
abf4b7ac89 | ||
|
|
9c49f83c16 | ||
|
|
3a625ed0ee | ||
|
|
2cfbf4bb90 | ||
|
|
5767533668 | ||
|
|
24798f19ca | ||
|
|
26f8249187 | ||
|
|
dcefa564da | ||
|
|
edd35dccea | ||
|
|
ea527ea60c | ||
|
|
fd5e1db22b | ||
|
|
39e23faf7f | ||
|
|
de285b531a | ||
|
|
0a29a592f9 | ||
|
|
e045b1d3b5 | ||
|
|
280e5fa861 | ||
|
|
472d3495b5 | ||
|
|
2778ac6870 | ||
|
|
743a0a8ac9 | ||
|
|
694712ed2e | ||
|
|
ea3b4f1790 | ||
|
|
da68818d4f | ||
|
|
db6a3b53c5 | ||
|
|
82b089498e | ||
|
|
948b0dd5e7 | ||
|
|
4acc0b51b1 | ||
|
|
a626b738a9 | ||
|
|
7119844313 | ||
|
|
5763f57830 | ||
|
|
70e8ceecce | ||
|
|
acbe1ac692 | ||
|
|
99bca2c467 | ||
|
|
b74ed1f30e | ||
|
|
8082ab78a1 | ||
|
|
c69076f517 | ||
|
|
648ab001b6 | ||
|
|
447034046e | ||
|
|
0770ac0bb4 | ||
|
|
aa2fbd4f70 | ||
|
|
58c8447531 | ||
|
|
bcca43d774 | ||
|
|
e9ccfe7ad2 | ||
|
|
6c2637ad34 | ||
|
|
7183d05dd6 | ||
|
|
b45ca85cd3 | ||
|
|
4ca45ebc73 | ||
|
|
6902768fed | ||
|
|
3f9f2ceaac | ||
|
|
2a248bd249 | ||
|
|
c559a6bafb | ||
|
|
19d7e9b5ed | ||
|
|
3e5a5accf7 | ||
|
|
424c91945a | ||
|
|
c657dc564e | ||
|
|
208f002284 | ||
|
|
084ecc01e0 | ||
|
|
08cb994d8d | ||
|
|
67f1a6688d | ||
|
|
efb7968e93 | ||
|
|
fe7c7db004 | ||
|
|
79d1ccae9a | ||
|
|
6e69af4aa8 | ||
|
|
d500b7d473 | ||
|
|
ef599a1aad | ||
|
|
2d197134f1 | ||
|
|
717080a009 | ||
|
|
19197c71ff | ||
|
|
051b93f2d8 | ||
|
|
e04b3598b8 | ||
|
|
b88e0fe564 | ||
|
|
060e3b4afe | ||
|
|
cd07267475 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,3 +3,4 @@
|
||||
**/*.egg-info
|
||||
.mypy_cache/
|
||||
**/.env
|
||||
.coverage
|
||||
|
||||
123
README.md
Normal file
123
README.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# CoreLibs for Python
|
||||
|
||||
> [!warning]
|
||||
> This is pre-production, location of methods and names of paths can change
|
||||
>
|
||||
> This will be split up into modules per file and this will be just a collection holder
|
||||
|
||||
This is a pip package that can be installed into any project and covers the following parts
|
||||
|
||||
- logging update with exception logs
|
||||
- requests wrapper for easier auth pass on access
|
||||
- dict fingerprinting
|
||||
- sending email
|
||||
- jmespath search
|
||||
- json helpers for conten replace and output
|
||||
- dump outputs for data for debugging
|
||||
- progress printing
|
||||
- string formatting, time creation, byte formatting
|
||||
- Enum base class
|
||||
- SQLite simple IO class
|
||||
- Symmetric encryption
|
||||
|
||||
## Current list
|
||||
|
||||
- config_handling: simple INI config file data loader with check/convert/etc
|
||||
- csv_interface: csv dict writer/reader helper
|
||||
- debug_handling: various debug helpers like data dumper, timer, utilization, etc
|
||||
- db_handling: SQLite interface class
|
||||
- encyption_handling: symmetric encryption
|
||||
- email_handling: simple email sending
|
||||
- file_handling: crc handling for file content and file names, progress bar
|
||||
- json_handling: jmespath support and json date support, replace content in dict with json paths
|
||||
- iterator_handling: list and dictionary handling support (search, fingerprinting, etc)
|
||||
- logging_handling: extend log and also error message handling
|
||||
- requests_handling: requests wrapper for better calls with auth headers
|
||||
- script_handling: pid lock file handling, abort timer
|
||||
- string_handling: byte format, datetime format, datetime compare, hashing, string formats for numbers, double byte string format, etc
|
||||
- var_handling: var type checkers, enum base class
|
||||
|
||||
## Unfinished
|
||||
|
||||
- csv_handling/csv_interface: The CSV DictWriter interface is just in a very basic way implemented
|
||||
- script_handling/script_helpers: No idea if there is need for this, tests are written but not finished
|
||||
|
||||
## UV setup
|
||||
|
||||
uv must be [installed](https://docs.astral.sh/uv/getting-started/installation/)
|
||||
|
||||
## How to publish
|
||||
|
||||
Have the following setup in `project.toml`
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "opj-pypi"
|
||||
url = "https://git.egplusww.jp/api/packages/PyPI/pypi/simple/"
|
||||
publish-url = "https://git.egplusww.jp/api/packages/PyPI/pypi"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
```sh
|
||||
uv build
|
||||
uv publish --index opj-pypi --token <gitea token>
|
||||
```
|
||||
|
||||
## Use package
|
||||
|
||||
We must set the full index URL here because we run with "--no-project"
|
||||
|
||||
```sh
|
||||
uv run --with corelibs --index opj-pypi=https://git.egplusww.jp/api/packages/PyPI/pypi/simple/ --no-project -- python -c "import corelibs"
|
||||
```
|
||||
|
||||
### Python tests
|
||||
|
||||
All python tests are the tests/ folder. They are structured by the source folder layout
|
||||
|
||||
run them with
|
||||
|
||||
```sh
|
||||
uv run pytest
|
||||
```
|
||||
|
||||
Get a coverate report
|
||||
|
||||
```sh
|
||||
uv run pytest --cov=corelibs
|
||||
uv run pytest --cov=corelibs --cov-report=term-missing
|
||||
```
|
||||
|
||||
### Other tests
|
||||
|
||||
In the test-run folder usage and run tests are located, runt them below
|
||||
|
||||
```sh
|
||||
uv run test-run/<script>
|
||||
```
|
||||
|
||||
## How to install in another project
|
||||
|
||||
This will also add the index entry
|
||||
|
||||
```sh
|
||||
uv add corelibs --index opj-pypi=https://git.egplusww.jp/api/packages/PyPI/pypi/simple/
|
||||
```
|
||||
|
||||
## Python venv setup
|
||||
|
||||
After clone, run the command below to install all dependenciss
|
||||
|
||||
```sh
|
||||
uv sync
|
||||
```
|
||||
|
||||
## NOTE on TLS problems
|
||||
|
||||
> [!warning] TLS problems with Netskope
|
||||
|
||||
If the Netskope service is running all uv runs will fail unless either --native-tls is set or the enviroment variable SSL_CERT_FILE is set, see blow
|
||||
|
||||
```sh
|
||||
export SSL_CERT_FILE='/Library/Application Support/Netskope/STAgent/data/nscacert_combined.pem'
|
||||
```
|
||||
88
ReadMe.md
88
ReadMe.md
@@ -1,88 +0,0 @@
|
||||
# CoreLibs for Python
|
||||
|
||||
This is a pip package that can be installed into any project and covers the following pars
|
||||
|
||||
- logging update with exception logs
|
||||
- requests wrapper for easier auth pass on access
|
||||
- dict fingerprinting
|
||||
- jmespath search
|
||||
- dump outputs for data
|
||||
- progress printing
|
||||
- string formatting, time creation, byte formatting
|
||||
|
||||
## Current list
|
||||
|
||||
- csv_handling: csv dict writer helper
|
||||
- debug_handling: various debug helpers like data dumper, timer, utilization, etc
|
||||
- file_handling: crc handling for file content and file names, progress bar
|
||||
- json_handling: jmespath support and json date support
|
||||
- list_dict_handling: list and dictionary handling support (search, fingerprinting, etc)
|
||||
- logging_handling: extend log and also error message handling
|
||||
- requests_handling: requests wrapper for better calls with auth headers
|
||||
- script_handling: pid lock file handling, abort timer
|
||||
- string_handling: byte format, datetime format, hashing, string formats for numbrers, double byte string format, etc
|
||||
|
||||
## UV setup
|
||||
|
||||
uv must be [installed](https://docs.astral.sh/uv/getting-started/installation/)
|
||||
|
||||
## How to publish
|
||||
|
||||
Have the following setup in `project.toml`
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "egra-gitea"
|
||||
url = "https://git.egplusww.jp/api/packages/PyPI/pypi/simple/"
|
||||
publish-url = "https://git.egplusww.jp/api/packages/PyPI/pypi"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
```sh
|
||||
uv build --native-tls
|
||||
uv publish --index egra-gitea --token <gitea token> --native-tls
|
||||
```
|
||||
|
||||
## Test package
|
||||
|
||||
We must set the full index URL here because we run with "--no-project"
|
||||
|
||||
```sh
|
||||
uv run --with corelibs --index egra-gitea=https://git.egplusww.jp/api/packages/PyPI/pypi/simple/ --no-project --native-tls -- python -c "import corelibs"
|
||||
```
|
||||
|
||||
### Other tests
|
||||
|
||||
In the test folder other tests are located.
|
||||
|
||||
At the moment only a small test for the "progress" and the "double byte string format" module is set
|
||||
|
||||
```sh
|
||||
uv run --native-tls tests/progress/progress_test.py
|
||||
```
|
||||
|
||||
```sh
|
||||
uv run --native-tls tests/double_byte_string_format/double_byte_string_format.py
|
||||
```
|
||||
|
||||
## How to install in another project
|
||||
|
||||
This will also add the index entry
|
||||
|
||||
```sh
|
||||
uv add corelibs --index egra-gitea=https://git.egplusww.jp/api/packages/PyPI/pypi/simple/ --native-tls
|
||||
```
|
||||
|
||||
## Python venv setup
|
||||
|
||||
In the folder where the script will be located
|
||||
|
||||
```sh
|
||||
uv venv --python 3.13
|
||||
```
|
||||
|
||||
Install all neded dependencies
|
||||
|
||||
```sh
|
||||
uv sync
|
||||
```
|
||||
11
SECURITY.md
Normal file
11
SECURITY.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Security Policy
|
||||
|
||||
This software follows the [Semver 2.0 scheme](https://semver.org/).
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version is supported
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Open a ticket to report a secuirty problem
|
||||
7
ToDo.md
7
ToDo.md
@@ -1,4 +1,7 @@
|
||||
# ToDo list
|
||||
|
||||
- stub files .pyi
|
||||
- fix all remaning check errors
|
||||
- [x] stub files .pyi
|
||||
- [ ] Add tests for all, we need 100% test coverate
|
||||
- [x] Log: add custom format for "stack_correct" if set, this will override the normal stack block
|
||||
- [ ] Log: add rotate for size based
|
||||
- [ ] All folders and file names need to be revisited for naming and content collection
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# MARK: Project info
|
||||
[project]
|
||||
name = "corelibs"
|
||||
version = "0.5.0"
|
||||
version = "0.36.0"
|
||||
description = "Collection of utils for Python scripts"
|
||||
readme = "ReadMe.md"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.13"
|
||||
dependencies = [
|
||||
"cryptography>=46.0.3",
|
||||
"jmespath>=1.0.1",
|
||||
"jsonpath-ng>=1.7.0",
|
||||
"psutil>=7.0.0",
|
||||
"requests>=2.32.4",
|
||||
]
|
||||
@@ -15,16 +17,22 @@ dependencies = [
|
||||
|
||||
# MARK: build target
|
||||
[[tool.uv.index]]
|
||||
name = "egra-gitea"
|
||||
name = "opj-pypi"
|
||||
url = "https://git.egplusww.jp/api/packages/PyPI/pypi/simple/"
|
||||
publish-url = "https://git.egplusww.jp/api/packages/PyPI/pypi"
|
||||
explicit = true
|
||||
|
||||
# MARK: build system
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"deepdiff>=8.6.1",
|
||||
"pytest>=8.4.1",
|
||||
"pytest-cov>=6.2.1",
|
||||
]
|
||||
|
||||
# MARK: Python linting
|
||||
[tool.pyright]
|
||||
typeCheckingMode = "strict"
|
||||
@@ -47,6 +55,38 @@ notes = ["FIXME", "TODO"]
|
||||
notes-rgx = '(FIXME|TODO)(\((TTD-|#)\[0-9]+\))'
|
||||
[tool.flake8]
|
||||
max-line-length = 120
|
||||
ignore = [
|
||||
"E741", # ignore ambigious variable name
|
||||
"W504" # Line break occurred after a binary operator [wrong triggered by "or" in if]
|
||||
]
|
||||
[tool.pylint.MASTER]
|
||||
# this is for the tests/etc folders
|
||||
init-hook='import sys; sys.path.append("src/")'
|
||||
|
||||
# MARK: Testing
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = [
|
||||
"tests",
|
||||
]
|
||||
|
||||
[tool.coverage.run]
|
||||
omit = [
|
||||
"*/tests/*",
|
||||
"*/test_*.py",
|
||||
"*/__init__.py"
|
||||
]
|
||||
|
||||
[tool.coverage.report]
|
||||
exclude_lines = [
|
||||
"pragma: no cover",
|
||||
"def __repr__",
|
||||
"def __str__",
|
||||
"raise AssertionError",
|
||||
"raise NotImplementedError",
|
||||
"if __name__ == .__main__.:"
|
||||
]
|
||||
exclude_also = [
|
||||
"def __.*__\\(",
|
||||
"def __.*\\(",
|
||||
"def _.*\\(",
|
||||
]
|
||||
|
||||
37
src/corelibs/check_handling/regex_constants.py
Normal file
37
src/corelibs/check_handling/regex_constants.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
List of regex compiled strings that can be used
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def compile_re(reg: str) -> re.Pattern[str]:
|
||||
"""
|
||||
compile a regex with verbose flag
|
||||
|
||||
Arguments:
|
||||
reg {str} -- _description_
|
||||
|
||||
Returns:
|
||||
re.Pattern[str] -- _description_
|
||||
"""
|
||||
return re.compile(reg, re.VERBOSE)
|
||||
|
||||
|
||||
# email regex
|
||||
EMAIL_BASIC_REGEX: str = r"""
|
||||
^[A-Za-z0-9!#$%&'*+\-\/=?^_`{|}~][A-Za-z0-9!#$%:\(\)&'*+\-\/=?^_`{|}~\.]{0,63}
|
||||
@(?!-)[A-Za-z0-9-]{1,63}(?<!-)(?:\.[A-Za-z0-9-]{1,63}(?<!-))*\.[a-zA-Z]{2,6}$
|
||||
"""
|
||||
# Domain regex with localhost
|
||||
DOMAIN_WITH_LOCALHOST_REGEX: str = r"""
|
||||
^(?:localhost|(?!-)[A-Za-z0-9-]{1,63}(?<!-)(?:\.[A-Za-z0-9-]{1,63}(?<!-))*\.[A-Za-z]{2,})$
|
||||
"""
|
||||
# domain regex with loclhost and optional port
|
||||
DOMAIN_WITH_LOCALHOST_PORT_REGEX: str = r"""
|
||||
^(?:localhost|(?!-)[A-Za-z0-9-]{1,63}(?<!-)(?:\.[A-Za-z0-9-]{1,63}(?<!-))*\.[A-Za-z]{2,})(?::\d+)?$
|
||||
"""
|
||||
# Domain, no localhost
|
||||
DOMAIN_REGEX: str = r"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)(?:\.[A-Za-z0-9-]{1,63}(?<!-))*\.[A-Za-z]{2,}$"
|
||||
|
||||
# __END__
|
||||
565
src/corelibs/config_handling/settings_loader.py
Normal file
565
src/corelibs/config_handling/settings_loader.py
Normal file
@@ -0,0 +1,565 @@
|
||||
"""
|
||||
Load settings file for a certain group
|
||||
Check data for existing and valid
|
||||
Additional check for override settings as arguments
|
||||
"""
|
||||
|
||||
import re
|
||||
import configparser
|
||||
from typing import Any, Tuple, Sequence, cast
|
||||
from pathlib import Path
|
||||
from corelibs.logging_handling.log import Log
|
||||
from corelibs.iterator_handling.list_helpers import convert_to_list, is_list_in_list
|
||||
from corelibs.var_handling.var_helpers import is_int, is_float, str_to_bool
|
||||
from corelibs.config_handling.settings_loader_handling.settings_loader_check import SettingsLoaderCheck
|
||||
|
||||
|
||||
class SettingsLoader:
|
||||
"""
|
||||
Settings Loader with Argument parser
|
||||
"""
|
||||
|
||||
# split char
|
||||
DEFAULT_ELEMENT_SPLIT_CHAR: str = ','
|
||||
|
||||
CONVERT_TO_LIST: list[str] = ['str', 'int', 'float', 'bool', 'auto']
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
args: dict[str, Any],
|
||||
config_file: Path,
|
||||
log: 'Log | None' = None,
|
||||
always_print: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
init the Settings loader
|
||||
|
||||
Args:
|
||||
args (dict): Script Arguments
|
||||
config_file (Path): config file including path
|
||||
log (Log | None): Lop class, if set errors are written to this
|
||||
always_print (bool): Set to true to always print errors, even if Log is available
|
||||
element_split_char (str): Split character, default is ','
|
||||
|
||||
Raises:
|
||||
ValueError: _description_
|
||||
"""
|
||||
self.args = args
|
||||
self.config_file = config_file
|
||||
self.log = log
|
||||
self.always_print = always_print
|
||||
# config parser, load config file first
|
||||
self.config_parser: configparser.ConfigParser | None = self.__load_config_file()
|
||||
# for check settings, abort flag
|
||||
self.__check_settings_abort: bool = False
|
||||
|
||||
# MARK: load settings
|
||||
def load_settings(
|
||||
self,
|
||||
config_id: str,
|
||||
config_validate: dict[str, list[str]] | None = None,
|
||||
allow_not_exist: bool = False
|
||||
) -> dict[str, str]:
|
||||
"""
|
||||
neutral settings loader
|
||||
|
||||
The settings values on the right side are seen as a list if they have "," inside (see ELEMENT SPLIT CHAR)
|
||||
but only if the "check:list." is set
|
||||
|
||||
for the allowe entries set, each set is "key => checks", check set is "check type:settings"
|
||||
key: the key name in the settings file
|
||||
check: check set with the following allowed entries on the left side for type
|
||||
- mandatory: must be set as "mandatory:yes", if the key entry is missing or empty throws error
|
||||
- check: see __check_settings for the settings currently available
|
||||
- matching: a | list of entries where the value has to match too
|
||||
- in: the right side is another KEY value from the settings where this value must be inside
|
||||
- split: character to split entries, if set check:list+ must be set if checks are needed
|
||||
- convert: convert to int, float -> if element is number convert, else leave as is
|
||||
- empty: convert empty to, if nothing set on the right side then convert to None type
|
||||
|
||||
TODO: there should be a config/options argument for general settings
|
||||
|
||||
Args:
|
||||
config_id (str): what block to load
|
||||
config_validate (dict[str, list[str]]): list of allowed entries sets
|
||||
allow_not_exist (bool): If set to True, does not throw an error, but returns empty set
|
||||
|
||||
Returns:
|
||||
dict[str, str]: key = value list
|
||||
"""
|
||||
# default set entries
|
||||
entry_set_empty: dict[str, str | None] = {}
|
||||
# entries that have to be split
|
||||
entry_split_char: dict[str, str] = {}
|
||||
# entries that should be converted
|
||||
entry_convert: dict[str, str] = {}
|
||||
# all the settings for the config id given
|
||||
settings: dict[str, dict[str, Any]] = {
|
||||
config_id: {},
|
||||
}
|
||||
if config_validate is None:
|
||||
config_validate = {}
|
||||
if self.config_parser is not None:
|
||||
try:
|
||||
# load all data as is, validation is done afterwards
|
||||
settings[config_id] = dict(self.config_parser[config_id])
|
||||
except KeyError as e:
|
||||
if allow_not_exist is True:
|
||||
return {}
|
||||
raise ValueError(self.__print(
|
||||
f"[!] Cannot read [{config_id}] block in the {self.config_file}: {e}",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
try:
|
||||
for key, checks in config_validate.items():
|
||||
skip = True
|
||||
split_char = self.DEFAULT_ELEMENT_SPLIT_CHAR
|
||||
# if one is set as list in check -> do not skip, but add to list
|
||||
for check in checks:
|
||||
if check.startswith("convert:"):
|
||||
try:
|
||||
[_, convert_to] = check.split(":")
|
||||
if convert_to not in self.CONVERT_TO_LIST:
|
||||
raise ValueError(self.__print(
|
||||
f"[!] In [{config_id}] the convert type is invalid {check}: {convert_to}",
|
||||
'CRITICAL'
|
||||
))
|
||||
entry_convert[key] = convert_to
|
||||
except ValueError as e:
|
||||
raise ValueError(self.__print(
|
||||
f"[!] In [{config_id}] the convert type setup for entry failed: {check}: {e}",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
if check.startswith('empty:'):
|
||||
try:
|
||||
[_, empty_set] = check.split(":")
|
||||
if not empty_set:
|
||||
empty_set = None
|
||||
entry_set_empty[key] = empty_set
|
||||
except ValueError as e:
|
||||
print(f"VALUE ERROR: {key}")
|
||||
raise ValueError(self.__print(
|
||||
f"[!] In [{config_id}] the empty set type for entry failed: {check}: {e}",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
# split char, also check to not set it twice, first one only
|
||||
if check.startswith("split:") and not entry_split_char.get(key):
|
||||
try:
|
||||
[_, split_char] = check.split(":")
|
||||
if len(split_char) == 0:
|
||||
self.__print(
|
||||
(
|
||||
f"[*] In [{config_id}] the [{key}] split char character is empty, "
|
||||
f"fallback to: {self.DEFAULT_ELEMENT_SPLIT_CHAR}"
|
||||
),
|
||||
"WARNING"
|
||||
)
|
||||
split_char = self.DEFAULT_ELEMENT_SPLIT_CHAR
|
||||
entry_split_char[key] = split_char
|
||||
skip = False
|
||||
except ValueError as e:
|
||||
raise ValueError(self.__print(
|
||||
f"[!] In [{config_id}] the split character setup for entry failed: {check}: {e}",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
if skip:
|
||||
continue
|
||||
settings[config_id][key] = [
|
||||
__value.replace(" ", "")
|
||||
for __value in settings[config_id][key].split(split_char)
|
||||
]
|
||||
except KeyError as e:
|
||||
raise ValueError(self.__print(
|
||||
f"[!] Cannot read [{config_id}] block because the entry [{e}] could not be found",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
else:
|
||||
# ignore error if arguments are set
|
||||
if not self.__check_arguments(config_validate, True):
|
||||
raise ValueError(self.__print(f"[!] Cannot find file: {self.config_file}", 'CRITICAL'))
|
||||
else:
|
||||
# base set
|
||||
settings[config_id] = {}
|
||||
# make sure all are set
|
||||
# if we have arguments set, this override config settings
|
||||
error: bool = False
|
||||
for entry, validate in config_validate.items():
|
||||
# if we have command line option set, this one overrides config
|
||||
if self.__get_arg(entry):
|
||||
self.__print(f"[*] Command line option override for: {entry}", 'WARNING')
|
||||
settings[config_id][entry] = self.args.get(entry)
|
||||
# validate checks
|
||||
for check in validate:
|
||||
# CHECKS
|
||||
# - mandatory
|
||||
# - check: regex check (see SettingsLoaderCheck class for entries)
|
||||
# - matching: entry in given list
|
||||
# - in: entry in other setting entry list
|
||||
# - length: for string length
|
||||
# - range: for int/float range check
|
||||
# mandatory check
|
||||
if check == "mandatory:yes" and (
|
||||
not settings[config_id].get(entry) or settings[config_id].get(entry) == ['']
|
||||
):
|
||||
error = True
|
||||
self.__print(f"[!] Missing content entry for: {entry}", 'ERROR')
|
||||
# skip if empty none
|
||||
if settings[config_id].get(entry) is None:
|
||||
continue
|
||||
if check.startswith("check:"):
|
||||
# replace the check and run normal checks
|
||||
settings[config_id][entry] = self.__check_settings(
|
||||
check, entry, settings[config_id][entry]
|
||||
)
|
||||
if self.__check_settings_abort is True:
|
||||
error = True
|
||||
elif check.startswith("matching:"):
|
||||
checks = check.replace("matching:", "").split("|")
|
||||
if __result := is_list_in_list(convert_to_list(settings[config_id][entry]), list(checks)):
|
||||
error = True
|
||||
self.__print(f"[!] [{entry}] '{__result}' not matching {checks}", 'ERROR')
|
||||
elif check.startswith("in:"):
|
||||
check = check.replace("in:", "")
|
||||
# skip if check does not exist, and set error
|
||||
if settings[config_id].get(check) is None:
|
||||
error = True
|
||||
self.__print(f"[!] [{entry}] '{check}' target does not exist", 'ERROR')
|
||||
continue
|
||||
# entry must be in check entry
|
||||
# in for list, else equal with convert to string
|
||||
if (
|
||||
__result := is_list_in_list(
|
||||
convert_to_list(settings[config_id][entry]),
|
||||
__checks := convert_to_list(settings[config_id][check])
|
||||
)
|
||||
):
|
||||
self.__print(f"[!] [{entry}] '{__result}' must be in the '{__checks}' values list", 'ERROR')
|
||||
error = True
|
||||
elif check.startswith('length:'):
|
||||
check = check.replace("length:", "")
|
||||
# length can be: n, n-, n-m, -m
|
||||
# as: equal, >= >=< =<
|
||||
self.__build_from_to_equal(entry, check)
|
||||
if not self.__length_range_validate(
|
||||
entry,
|
||||
'length',
|
||||
cast(list[str], convert_to_list(settings[config_id][entry])),
|
||||
self.__build_from_to_equal(entry, check, convert_to_int=True)
|
||||
):
|
||||
error = True
|
||||
elif check.startswith('range:'):
|
||||
check = check.replace("range:", "")
|
||||
if not self.__length_range_validate(
|
||||
entry,
|
||||
'range',
|
||||
cast(list[str], convert_to_list(settings[config_id][entry])),
|
||||
self.__build_from_to_equal(entry, check)
|
||||
):
|
||||
error = True
|
||||
# after post clean up if we have empty entries and we are mandatory
|
||||
if check == "mandatory:yes" and (
|
||||
not settings[config_id].get(entry) or settings[config_id].get(entry) == ['']
|
||||
):
|
||||
error = True
|
||||
self.__print(f"[!] Missing content entry for: {entry}", 'ERROR')
|
||||
if error is True:
|
||||
raise ValueError(self.__print("[!] Missing or incorrect settings data. Cannot proceed", 'CRITICAL'))
|
||||
# set empty
|
||||
for [entry, empty_set] in entry_set_empty.items():
|
||||
# if set, skip, else set to empty value
|
||||
if settings[config_id].get(entry) or isinstance(settings[config_id].get(entry), list):
|
||||
continue
|
||||
settings[config_id][entry] = empty_set
|
||||
# Convert input
|
||||
for [entry, convert_type] in entry_convert.items():
|
||||
if convert_type in ["int", "any"] and is_int(settings[config_id][entry]):
|
||||
settings[config_id][entry] = int(settings[config_id][entry])
|
||||
elif convert_type in ["float", "any"] and is_float(settings[config_id][entry]):
|
||||
settings[config_id][entry] = float(settings[config_id][entry])
|
||||
elif convert_type in ["bool", "any"] and (
|
||||
settings[config_id][entry] == "true" or
|
||||
settings[config_id][entry] == "True" or
|
||||
settings[config_id][entry] == "false" or
|
||||
settings[config_id][entry] == "False"
|
||||
):
|
||||
try:
|
||||
settings[config_id][entry] = str_to_bool(settings[config_id][entry])
|
||||
except ValueError:
|
||||
self.__print(
|
||||
f"[!] Could not convert to boolean for '{entry}': {settings[config_id][entry]}",
|
||||
'ERROR'
|
||||
)
|
||||
# string is always string
|
||||
# TODO: empty and int/float/bool: set to none?
|
||||
|
||||
return settings[config_id]
|
||||
|
||||
# MARK: build from/to/requal logic
|
||||
def __build_from_to_equal(
|
||||
self, entry: str, check: str, convert_to_int: bool = False
|
||||
) -> Tuple[float | None, float | None, float | None]:
|
||||
"""
|
||||
split out the "n-m" part to get the to/from/equal
|
||||
|
||||
Arguments:
|
||||
entry {str} -- _description_
|
||||
check {str} -- _description_
|
||||
|
||||
Returns:
|
||||
Tuple[float | None, float | None, float | None] -- _description_
|
||||
|
||||
Throws:
|
||||
ValueError if range/length entries are not float
|
||||
"""
|
||||
__from = None
|
||||
__to = None
|
||||
__equal = None
|
||||
try:
|
||||
[__from, __to] = check.split('-')
|
||||
if (__from and not is_float(__from)) or (__to and not is_float(__to)):
|
||||
raise ValueError(self.__print(
|
||||
f"[{entry}] Check value for length is not in: {check}",
|
||||
'CRITICAL'
|
||||
))
|
||||
if len(__from) == 0:
|
||||
__from = None
|
||||
if len(__to) == 0:
|
||||
__to = None
|
||||
except ValueError as e:
|
||||
if not is_float(__equal := check):
|
||||
raise ValueError(self.__print(
|
||||
f"[{entry}] Check value for length is not a valid integer: {check}",
|
||||
'CRITICAL'
|
||||
)) from e
|
||||
if len(__equal) == 0:
|
||||
__equal = None
|
||||
# makre sure this is all int or None
|
||||
if __from is not None:
|
||||
__from = int(__from) if convert_to_int else float(__from)
|
||||
if __to is not None:
|
||||
__to = int(__to) if convert_to_int else float(__to)
|
||||
if __equal is not None:
|
||||
__equal = int(__equal) if convert_to_int else float(__equal)
|
||||
return (
|
||||
__from,
|
||||
__to,
|
||||
__equal
|
||||
)
|
||||
|
||||
# MARK: length/range validation
|
||||
def __length_range_validate(
|
||||
self,
|
||||
entry: str,
|
||||
check_type: str,
|
||||
values: Sequence[str | int | float],
|
||||
check: Tuple[float | None, float | None, float | None],
|
||||
) -> bool:
|
||||
(__from, __to, __equal) = check
|
||||
valid = True
|
||||
for value_raw in convert_to_list(values):
|
||||
# skip no tset values for range check
|
||||
if not value_raw:
|
||||
continue
|
||||
value = 0
|
||||
error_mark = ''
|
||||
if check_type == 'length':
|
||||
error_mark = 'length'
|
||||
value = len(str(value_raw))
|
||||
elif check_type == 'range':
|
||||
error_mark = 'range'
|
||||
value = float(str(value_raw))
|
||||
if __equal is not None and value != __equal:
|
||||
self.__print(f"[!] [{entry}] '{value_raw}' {error_mark} does not match {__equal}", 'ERROR')
|
||||
valid = False
|
||||
continue
|
||||
if __from is not None and __to is None and value < __from:
|
||||
self.__print(f"[!] [{entry}] '{value_raw}' {error_mark} smaller than minimum {__from}", 'ERROR')
|
||||
valid = False
|
||||
continue
|
||||
if __from is None and __to is not None and value > __to:
|
||||
self.__print(f"[!] [{entry}] '{value_raw}' {error_mark} larger than maximum {__to}", 'ERROR')
|
||||
valid = False
|
||||
continue
|
||||
if __from is not None and __to is not None and (
|
||||
value < __from or value > __to
|
||||
):
|
||||
self.__print(
|
||||
f"[!] [{entry}] '{value_raw}' {error_mark} outside valid range {__from} to {__to}",
|
||||
'ERROR'
|
||||
)
|
||||
valid = False
|
||||
continue
|
||||
return valid
|
||||
|
||||
# MARK: load config file data from file
|
||||
def __load_config_file(self) -> configparser.ConfigParser | None:
|
||||
"""
|
||||
load and parse the config file
|
||||
if not loadable return None
|
||||
"""
|
||||
# remove file name and get base path and check
|
||||
if not self.config_file.parent.is_dir():
|
||||
raise ValueError(f"Cannot find the config folder: {self.config_file.parent}")
|
||||
config = configparser.ConfigParser()
|
||||
if self.config_file.is_file():
|
||||
config.read(self.config_file)
|
||||
return config
|
||||
return None
|
||||
|
||||
# MARK: regex clean up one
|
||||
def __clean_invalid_setting(
|
||||
self,
|
||||
entry: str,
|
||||
validate: str,
|
||||
value: str,
|
||||
regex: str,
|
||||
regex_clean: str | None,
|
||||
replace: str = "",
|
||||
print_error: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
check is a string is invalid, print optional error message and clean up string
|
||||
|
||||
Args:
|
||||
entry (str): what entry key
|
||||
validate (str): validate type
|
||||
value (str): the value to check against
|
||||
regex (str): regex used for checking as r'...'
|
||||
regex_clean (str): regex used for cleaning as r'...'
|
||||
replace (str): replace with character. Defaults to ''
|
||||
print_error (bool): print the error message. Defaults to True
|
||||
"""
|
||||
check = re.compile(regex, re.VERBOSE)
|
||||
clean: re.Pattern[str] | None = None
|
||||
if regex_clean is not None:
|
||||
clean = re.compile(regex_clean, re.VERBOSE)
|
||||
# value must be set if clean is None, else empty value is allowed and will fail
|
||||
if (clean is None and value or clean) and not check.search(value):
|
||||
self.__print(
|
||||
f"[!] Invalid content for '{entry}' with check '{validate}' and data: {value}",
|
||||
'ERROR', print_error
|
||||
)
|
||||
# clean up if clean up is not none, else return EMPTY string
|
||||
if clean is not None:
|
||||
return clean.sub(replace, value)
|
||||
self.__check_settings_abort = True
|
||||
return ''
|
||||
# else return as is
|
||||
return value
|
||||
|
||||
# MARK: check settings, regx
|
||||
def __check_settings(
|
||||
self,
|
||||
check: str, entry: str, setting_value: list[str] | str
|
||||
) -> list[str] | str:
|
||||
"""
|
||||
check each setting valid
|
||||
The settings are defined in the SettingsLoaderCheck class
|
||||
|
||||
Args:
|
||||
check (str): What check to run
|
||||
entry (str): Variable name, just for information message
|
||||
setting_value (list[str | int] | str | int): settings value data
|
||||
|
||||
Returns:
|
||||
list[str | int] |111 str | int: cleaned up settings value data
|
||||
"""
|
||||
check = check.replace("check:", "")
|
||||
# get the check settings
|
||||
__check_settings = SettingsLoaderCheck.CHECK_SETTINGS.get(check)
|
||||
if __check_settings is None:
|
||||
raise ValueError(self.__print(
|
||||
f"[{entry}] Cannot get SettingsLoaderCheck.CHECK_SETTINGS for {check}",
|
||||
'CRITICAL'
|
||||
))
|
||||
# reset the abort check
|
||||
self.__check_settings_abort = False
|
||||
# either removes or replaces invalid characters in the list
|
||||
if isinstance(setting_value, list):
|
||||
# clean up invalid characters
|
||||
# loop over result and keep only filled (strip empty)
|
||||
setting_value = [e for e in [
|
||||
self.__clean_invalid_setting(
|
||||
entry, check, str(__entry),
|
||||
__check_settings['regex'], __check_settings['regex_clean'], __check_settings['replace']
|
||||
)
|
||||
for __entry in setting_value
|
||||
] if e]
|
||||
else:
|
||||
setting_value = self.__clean_invalid_setting(
|
||||
entry, check, str(setting_value),
|
||||
__check_settings['regex'], __check_settings['regex_clean'], __check_settings['replace']
|
||||
)
|
||||
# else:
|
||||
# self.__print(f"[!] Unkown type to check", 'ERROR)
|
||||
# return data
|
||||
return setting_value
|
||||
|
||||
# MARK: check arguments, for config file load fail
|
||||
def __check_arguments(self, arguments: dict[str, list[str]], all_set: bool = False) -> bool:
|
||||
"""
|
||||
check if ast least one argument is set
|
||||
|
||||
Args:
|
||||
arguments (list[str]): _description_
|
||||
|
||||
Returns:
|
||||
bool: _description_
|
||||
"""
|
||||
count_set = 0
|
||||
count_arguments = 0
|
||||
has_argument = False
|
||||
for argument, validate in arguments.items():
|
||||
# if argument is mandatory add to count, if not mandatory set has "has" to skip error
|
||||
mandatory = any(entry == "mandatory:yes" for entry in validate)
|
||||
if not mandatory:
|
||||
has_argument = True
|
||||
continue
|
||||
count_arguments += 1
|
||||
if self.__get_arg(argument):
|
||||
has_argument = True
|
||||
count_set += 1
|
||||
# for all set, True only if all are set
|
||||
if all_set is True:
|
||||
has_argument = count_set == count_arguments
|
||||
|
||||
return has_argument
|
||||
|
||||
# MARK: get argument from args dict
|
||||
def __get_arg(self, entry: str) -> Any:
|
||||
"""
|
||||
check if an argument entry xists, if None -> returns None else value of argument
|
||||
|
||||
Arguments:
|
||||
entry {str} -- _description_
|
||||
|
||||
Returns:
|
||||
Any -- _description_
|
||||
"""
|
||||
if self.args.get(entry) is None:
|
||||
return None
|
||||
return self.args.get(entry)
|
||||
|
||||
# MARK: error print
|
||||
def __print(self, msg: str, level: str, print_error: bool = True) -> str:
|
||||
"""
|
||||
print out error, if Log class is set then print to log instead
|
||||
|
||||
Arguments:
|
||||
msg {str} -- _description_
|
||||
level {str} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
print_error {bool} -- _description_ (default: {True})
|
||||
"""
|
||||
if self.log is not None:
|
||||
if not Log.validate_log_level(level):
|
||||
level = 'ERROR'
|
||||
self.log.logger.log(Log.get_log_level_int(level), msg, stacklevel=2)
|
||||
if self.log is None or self.always_print:
|
||||
if print_error:
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
|
||||
# __END__
|
||||
@@ -0,0 +1,81 @@
|
||||
"""
|
||||
Class of checks that can be run on value entries
|
||||
"""
|
||||
|
||||
from typing import TypedDict
|
||||
from corelibs.check_handling.regex_constants import (
|
||||
EMAIL_BASIC_REGEX, DOMAIN_WITH_LOCALHOST_REGEX, DOMAIN_WITH_LOCALHOST_PORT_REGEX, DOMAIN_REGEX
|
||||
)
|
||||
|
||||
|
||||
class SettingsLoaderCheckValue(TypedDict):
|
||||
"""Settings check entries"""
|
||||
|
||||
regex: str
|
||||
# if None, then on error we exit, eles we clean up data
|
||||
regex_clean: str | None
|
||||
replace: str
|
||||
|
||||
|
||||
class SettingsLoaderCheck:
|
||||
"""
|
||||
check:<NAME> or check:list+<NAME>
|
||||
"""
|
||||
|
||||
CHECK_SETTINGS: dict[str, SettingsLoaderCheckValue] = {
|
||||
"int": {
|
||||
"regex": r"^[0-9]+$",
|
||||
"regex_clean": r"[^0-9]",
|
||||
"replace": "",
|
||||
},
|
||||
"string.alphanumeric": {
|
||||
"regex": r"^[a-zA-Z0-9]+$",
|
||||
"regex_clean": r"[^a-zA-Z0-9]",
|
||||
"replace": "",
|
||||
},
|
||||
"string.alphanumeric.lower.dash": {
|
||||
"regex": r"^[a-z0-9-]+$",
|
||||
"regex_clean": r"[^a-z0-9-]",
|
||||
"replace": "",
|
||||
},
|
||||
# A-Z a-z 0-9 _ - . ONLY
|
||||
# This one does not remove, but replaces with _
|
||||
"string.alphanumeric.extended.replace": {
|
||||
"regex": r"^[_.a-zA-Z0-9-]+$",
|
||||
"regex_clean": r"[^_.a-zA-Z0-9-]",
|
||||
"replace": "_",
|
||||
},
|
||||
# This does a baisc email check, only alphanumeric with special characters
|
||||
"string.email.basic": {
|
||||
"regex": EMAIL_BASIC_REGEX,
|
||||
"regex_clean": None,
|
||||
"replace": "",
|
||||
},
|
||||
# Domain check, including localhost no port
|
||||
"string.domain.with-localhost": {
|
||||
"regex": DOMAIN_WITH_LOCALHOST_REGEX,
|
||||
"regex_clean": None,
|
||||
"replace": "",
|
||||
},
|
||||
# Domain check, with localhost and port
|
||||
"string.domain.with-localhost.port": {
|
||||
"regex": DOMAIN_WITH_LOCALHOST_PORT_REGEX,
|
||||
"regex_clean": None,
|
||||
"replace": "",
|
||||
},
|
||||
# Domain check, no pure localhost allowed
|
||||
"string.domain": {
|
||||
"regex": DOMAIN_REGEX,
|
||||
"regex_clean": None,
|
||||
"replace": "",
|
||||
},
|
||||
# Basic date check, does not validate date itself
|
||||
"string.date": {
|
||||
"regex": r"^\d{4}[/-]\d{1,2}[/-]\d{1,2}$",
|
||||
"regex_clean": None,
|
||||
"replace": "",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# __END__
|
||||
155
src/corelibs/csv_handling/csv_interface.py
Normal file
155
src/corelibs/csv_handling/csv_interface.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Write to CSV file
|
||||
- each class set is one file write with one header set
|
||||
"""
|
||||
|
||||
from typing import Any, Sequence
|
||||
from pathlib import Path
|
||||
from collections import Counter
|
||||
import csv
|
||||
from corelibs.exceptions.csv_exceptions import (
|
||||
NoCsvReader, CompulsoryCsvHeaderCheckFailed, CsvHeaderDataMissing
|
||||
)
|
||||
|
||||
DELIMITER = ","
|
||||
QUOTECHAR = '"'
|
||||
# type: _QuotingType
|
||||
QUOTING = csv.QUOTE_MINIMAL
|
||||
|
||||
|
||||
class CsvWriter:
|
||||
"""
|
||||
write to a CSV file
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_name: Path,
|
||||
header_mapping: dict[str, str],
|
||||
header_order: list[str] | None = None,
|
||||
delimiter: str = DELIMITER,
|
||||
quotechar: str = QUOTECHAR,
|
||||
quoting: Any = QUOTING,
|
||||
):
|
||||
self.__file_name = file_name
|
||||
# Key: index for write for the line dict, Values: header entries
|
||||
self.header_mapping = header_mapping
|
||||
self.header: Sequence[str] = list(header_mapping.values())
|
||||
self.__delimiter = delimiter
|
||||
self.__quotechar = quotechar
|
||||
self.__quoting = quoting
|
||||
self.csv_file_writer = self.__open_csv(header_order)
|
||||
|
||||
def __open_csv(self, header_order: list[str] | None) -> csv.DictWriter[str]:
|
||||
"""
|
||||
open csv file for writing, write headers
|
||||
|
||||
Note that if there is no header_order set we use the order in header dictionary
|
||||
|
||||
Arguments:
|
||||
line {list[str] | None} -- optional dedicated header order
|
||||
|
||||
Returns:
|
||||
csv.DictWriter[str] | None: _description_
|
||||
"""
|
||||
# if header order is set, make sure all header value fields exist
|
||||
if not self.header:
|
||||
raise CsvHeaderDataMissing("No header data available to write CSV file")
|
||||
header_values = self.header
|
||||
if header_order is not None:
|
||||
if Counter(header_values) != Counter(header_order):
|
||||
raise CompulsoryCsvHeaderCheckFailed(
|
||||
"header order does not match header values: "
|
||||
f"{', '.join(header_values)} != {', '.join(header_order)}"
|
||||
)
|
||||
header_values = header_order
|
||||
# no duplicates
|
||||
if len(header_values) != len(set(header_values)):
|
||||
raise CompulsoryCsvHeaderCheckFailed(f"Header must have unique values only: {', '.join(header_values)}")
|
||||
try:
|
||||
fp = open(
|
||||
self.__file_name,
|
||||
"w", encoding="utf-8"
|
||||
)
|
||||
csv_file_writer = csv.DictWriter(
|
||||
fp,
|
||||
fieldnames=header_values,
|
||||
delimiter=self.__delimiter,
|
||||
quotechar=self.__quotechar,
|
||||
quoting=self.__quoting,
|
||||
)
|
||||
csv_file_writer.writeheader()
|
||||
return csv_file_writer
|
||||
except OSError as err:
|
||||
raise NoCsvReader(f"Could not open CSV file for writing: {err}") from err
|
||||
|
||||
def write_csv(self, line: dict[str, str]) -> None:
|
||||
"""
|
||||
write member csv line
|
||||
|
||||
Arguments:
|
||||
line {dict[str, str]} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
csv_row: dict[str, Any] = {}
|
||||
# only write entries that are in the header list
|
||||
for key, value in self.header_mapping.items():
|
||||
csv_row[value] = line[key]
|
||||
self.csv_file_writer.writerow(csv_row)
|
||||
|
||||
|
||||
class CsvReader:
|
||||
"""
|
||||
read from a CSV file
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_name: Path,
|
||||
header_check: Sequence[str] | None = None,
|
||||
delimiter: str = DELIMITER,
|
||||
quotechar: str = QUOTECHAR,
|
||||
quoting: Any = QUOTING,
|
||||
):
|
||||
self.__file_name = file_name
|
||||
self.__header_check = header_check
|
||||
self.__delimiter = delimiter
|
||||
self.__quotechar = quotechar
|
||||
self.__quoting = quoting
|
||||
self.header: Sequence[str] | None = None
|
||||
self.csv_file_reader = self.__open_csv()
|
||||
|
||||
def __open_csv(self) -> csv.DictReader[str]:
|
||||
"""
|
||||
open csv file for reading
|
||||
|
||||
Returns:
|
||||
csv.DictReader | None: _description_
|
||||
"""
|
||||
try:
|
||||
fp = open(
|
||||
self.__file_name,
|
||||
"r", encoding="utf-8"
|
||||
)
|
||||
csv_file_reader = csv.DictReader(
|
||||
fp,
|
||||
delimiter=self.__delimiter,
|
||||
quotechar=self.__quotechar,
|
||||
quoting=self.__quoting,
|
||||
)
|
||||
self.header = csv_file_reader.fieldnames
|
||||
if not self.header:
|
||||
raise CsvHeaderDataMissing("No header data available in CSV file")
|
||||
if self.__header_check is not None:
|
||||
header_diff = set(self.__header_check).difference(set(self.header or []))
|
||||
if header_diff:
|
||||
raise CompulsoryCsvHeaderCheckFailed(
|
||||
f"CSV header does not match expected header: {', '.join(header_diff)} missing"
|
||||
)
|
||||
return csv_file_reader
|
||||
except OSError as err:
|
||||
raise NoCsvReader(f"Could not open CSV file for reading: {err}") from err
|
||||
|
||||
# __END__
|
||||
@@ -1,93 +0,0 @@
|
||||
"""
|
||||
Write to CSV file
|
||||
- each class set is one file write with one header set
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
from collections import Counter
|
||||
import csv
|
||||
|
||||
|
||||
class CsvWriter:
|
||||
"""
|
||||
write to a CSV file
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: Path,
|
||||
file_name: str,
|
||||
header: dict[str, str],
|
||||
header_order: list[str] | None = None
|
||||
):
|
||||
self.path = path
|
||||
self.file_name = file_name
|
||||
# Key: index for write for the line dict, Values: header entries
|
||||
self.header = header
|
||||
self.csv_file_writer = self.__open_csv(header_order)
|
||||
|
||||
def __open_csv(self, header_order: list[str] | None) -> 'csv.DictWriter[str] | None':
|
||||
"""
|
||||
open csv file for writing, write headers
|
||||
|
||||
Note that if there is no header_order set we use the order in header dictionary
|
||||
|
||||
Arguments:
|
||||
line {list[str] | None} -- optional dedicated header order
|
||||
|
||||
Returns:
|
||||
csv.DictWriter[str] | None: _description_
|
||||
"""
|
||||
# if header order is set, make sure all header value fields exist
|
||||
header_values = self.header.values()
|
||||
if header_order is not None:
|
||||
if Counter(header_values) != Counter(header_order):
|
||||
print(
|
||||
"header order does not match header values: "
|
||||
f"{', '.join(header_values)} != {', '.join(header_order)}"
|
||||
)
|
||||
return None
|
||||
header_values = header_order
|
||||
# no duplicates
|
||||
if len(header_values) != len(set(header_values)):
|
||||
print(f"Header must have unique values only: {', '.join(header_values)}")
|
||||
return None
|
||||
try:
|
||||
fp = open(
|
||||
self.path.joinpath(self.file_name),
|
||||
"w", encoding="utf-8"
|
||||
)
|
||||
csv_file_writer = csv.DictWriter(
|
||||
fp,
|
||||
fieldnames=header_values,
|
||||
delimiter=",",
|
||||
quotechar='"',
|
||||
quoting=csv.QUOTE_MINIMAL,
|
||||
)
|
||||
csv_file_writer.writeheader()
|
||||
return csv_file_writer
|
||||
except OSError as err:
|
||||
print("OS error:", err)
|
||||
return None
|
||||
|
||||
def write_csv(self, line: dict[str, str]) -> bool:
|
||||
"""
|
||||
write member csv line
|
||||
|
||||
Arguments:
|
||||
line {dict[str, str]} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
if self.csv_file_writer is None:
|
||||
return False
|
||||
csv_row: dict[str, Any] = {}
|
||||
# only write entries that are in the header list
|
||||
for key, value in self.header.items():
|
||||
csv_row[value] = line[key]
|
||||
self.csv_file_writer.writerow(csv_row)
|
||||
return True
|
||||
|
||||
# __END__
|
||||
0
src/corelibs/datetime_handling/__init__.py
Normal file
0
src/corelibs/datetime_handling/__init__.py
Normal file
439
src/corelibs/datetime_handling/datetime_helpers.py
Normal file
439
src/corelibs/datetime_handling/datetime_helpers.py
Normal file
@@ -0,0 +1,439 @@
|
||||
"""
|
||||
Various string based date/time helpers
|
||||
"""
|
||||
|
||||
import time as time_t
|
||||
from datetime import datetime, time
|
||||
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
|
||||
from typing import Callable
|
||||
|
||||
DAYS_OF_WEEK_LONG_TO_SHORT: dict[str, str] = {
|
||||
'Monday': 'Mon',
|
||||
'Tuesday': 'Tue',
|
||||
'Wednesay': 'Wed',
|
||||
'Thursday': 'Thu',
|
||||
'Friday': 'Fri',
|
||||
'Saturday': 'Sat',
|
||||
'Sunday': 'Sun',
|
||||
}
|
||||
DAYS_OF_WEEK_ISO: dict[int, str] = {
|
||||
1: 'Mon', 2: 'Tue', 3: 'Wed', 4: 'Thu', 5: 'Fri', 6: 'Sat', 7: 'Sun'
|
||||
}
|
||||
DAYS_OF_WEEK_ISO_REVERSED: dict[str, int] = {value: key for key, value in DAYS_OF_WEEK_ISO.items()}
|
||||
|
||||
|
||||
def create_time(timestamp: float, timestamp_format: str = "%Y-%m-%d %H:%M:%S") -> str:
|
||||
"""
|
||||
just takes a timestamp and prints out humand readable format
|
||||
|
||||
Arguments:
|
||||
timestamp {float} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
timestamp_format {_type_} -- _description_ (default: {"%Y-%m-%d %H:%M:%S"})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
return time_t.strftime(timestamp_format, time_t.localtime(timestamp))
|
||||
|
||||
|
||||
def get_system_timezone():
|
||||
"""Get system timezone using datetime's automatic detection"""
|
||||
# Get current time with system timezone
|
||||
local_time = datetime.now().astimezone()
|
||||
|
||||
# Extract timezone info
|
||||
system_tz = local_time.tzinfo
|
||||
timezone_name = str(system_tz)
|
||||
|
||||
return system_tz, timezone_name
|
||||
|
||||
|
||||
def parse_timezone_data(timezone_tz: str = '') -> ZoneInfo:
|
||||
"""
|
||||
parses a string to get the ZoneInfo
|
||||
If not set or not valid gets local time,
|
||||
if that is not possible get UTC
|
||||
|
||||
Keyword Arguments:
|
||||
timezone_tz {str} -- _description_ (default: {''})
|
||||
|
||||
Returns:
|
||||
ZoneInfo -- _description_
|
||||
"""
|
||||
try:
|
||||
return ZoneInfo(timezone_tz)
|
||||
except (ZoneInfoNotFoundError, ValueError, TypeError):
|
||||
# use default
|
||||
time_tz, time_tz_str = get_system_timezone()
|
||||
if time_tz is None:
|
||||
return ZoneInfo('UTC')
|
||||
# TODO build proper TZ lookup
|
||||
tz_mapping = {
|
||||
'JST': 'Asia/Tokyo',
|
||||
'KST': 'Asia/Seoul',
|
||||
'IST': 'Asia/Kolkata',
|
||||
'CST': 'Asia/Shanghai', # Default to China for CST
|
||||
'AEST': 'Australia/Sydney',
|
||||
'AWST': 'Australia/Perth',
|
||||
'EST': 'America/New_York',
|
||||
'EDT': 'America/New_York',
|
||||
'CDT': 'America/Chicago',
|
||||
'MST': 'America/Denver',
|
||||
'MDT': 'America/Denver',
|
||||
'PST': 'America/Los_Angeles',
|
||||
'PDT': 'America/Los_Angeles',
|
||||
'GMT': 'UTC',
|
||||
'UTC': 'UTC',
|
||||
'CET': 'Europe/Berlin',
|
||||
'CEST': 'Europe/Berlin',
|
||||
'BST': 'Europe/London',
|
||||
}
|
||||
try:
|
||||
return ZoneInfo(tz_mapping[time_tz_str])
|
||||
except (ZoneInfoNotFoundError, IndexError) as e:
|
||||
raise ValueError(f"No mapping for {time_tz_str}: {e}") from e
|
||||
|
||||
|
||||
def get_datetime_iso8601(timezone_tz: str | ZoneInfo = '', sep: str = 'T', timespec: str = 'microseconds') -> str:
|
||||
"""
|
||||
set a datetime in the iso8601 format with microseconds
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
# parse if this is a string
|
||||
if isinstance(timezone_tz, str):
|
||||
timezone_tz = parse_timezone_data(timezone_tz)
|
||||
return datetime.now(timezone_tz).isoformat(sep=sep, timespec=timespec)
|
||||
|
||||
|
||||
def validate_date(date: str, not_before: datetime | None = None, not_after: datetime | None = None) -> bool:
|
||||
"""
|
||||
check if Y-m-d or Y/m/d are parsable and valid
|
||||
|
||||
Arguments:
|
||||
date {str} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
formats = ['%Y-%m-%d', '%Y/%m/%d']
|
||||
for __format in formats:
|
||||
try:
|
||||
__date = datetime.strptime(date, __format).date()
|
||||
if not_before is not None and __date < not_before.date():
|
||||
return False
|
||||
if not_after is not None and __date > not_after.date():
|
||||
return False
|
||||
return True
|
||||
except ValueError:
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def parse_flexible_date(
|
||||
date_str: str,
|
||||
timezone_tz: str | ZoneInfo | None = None,
|
||||
shift_time_zone: bool = True
|
||||
) -> datetime | None:
|
||||
"""
|
||||
Parse date string in multiple formats
|
||||
will add time zone info if not None
|
||||
on default it will change the TZ and time to the new time zone
|
||||
if no TZ info is set in date_str, then localtime is assumed
|
||||
|
||||
Arguments:
|
||||
date_str {str} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
timezone_tz {str | ZoneInfo | None} -- _description_ (default: {None})
|
||||
shift_time_zone {bool} -- _description_ (default: {True})
|
||||
|
||||
Returns:
|
||||
datetime | None -- _description_
|
||||
"""
|
||||
|
||||
date_str = date_str.strip()
|
||||
|
||||
# Try different parsing methods
|
||||
parsers: list[Callable[[str], datetime]] = [
|
||||
# ISO 8601 format, also with missing "T"
|
||||
lambda x: datetime.fromisoformat(x), # pylint: disable=W0108
|
||||
lambda x: datetime.fromisoformat(x.replace(' ', 'T')), # pylint: disable=W0108
|
||||
# Simple date format
|
||||
lambda x: datetime.strptime(x, "%Y-%m-%d"),
|
||||
# datetime without T
|
||||
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"),
|
||||
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
|
||||
# Alternative ISO formats (fallback)
|
||||
lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S"),
|
||||
lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f"),
|
||||
]
|
||||
|
||||
if timezone_tz is not None:
|
||||
if isinstance(timezone_tz, str):
|
||||
timezone_tz = parse_timezone_data(timezone_tz)
|
||||
|
||||
date_new = None
|
||||
for parser in parsers:
|
||||
try:
|
||||
date_new = parser(date_str)
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if date_new is not None:
|
||||
if timezone_tz is not None:
|
||||
# shift time zone (default), this will change the date
|
||||
# if the date has no +HH:MM it will take the local time zone as base
|
||||
if shift_time_zone:
|
||||
return date_new.astimezone(timezone_tz)
|
||||
# just add the time zone
|
||||
return date_new.replace(tzinfo=timezone_tz)
|
||||
return date_new
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def compare_dates(date1_str: str, date2_str: str) -> None | bool:
|
||||
"""
|
||||
compare two dates, if the first one is newer than the second one return True
|
||||
If the dates are equal then false will be returned
|
||||
on error return None
|
||||
|
||||
Arguments:
|
||||
date1_str {str} -- _description_
|
||||
date2_str {str} -- _description_
|
||||
|
||||
Returns:
|
||||
None | bool -- _description_
|
||||
"""
|
||||
|
||||
try:
|
||||
# Parse both dates
|
||||
date1 = parse_flexible_date(date1_str)
|
||||
date2 = parse_flexible_date(date2_str)
|
||||
|
||||
# Check if parsing was successful
|
||||
if date1 is None or date2 is None:
|
||||
return None
|
||||
|
||||
# Compare dates
|
||||
return date1.date() > date2.date()
|
||||
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def find_newest_datetime_in_list(date_list: list[str]) -> None | str:
|
||||
"""
|
||||
Find the newest date from a list of ISO 8601 formatted date strings.
|
||||
Handles potential parsing errors gracefully.
|
||||
|
||||
Args:
|
||||
date_list (list): List of date strings in format '2025-08-06T16:17:39.747+09:00'
|
||||
|
||||
Returns:
|
||||
str: The date string with the newest/latest date, or None if list is empty or all dates are invalid
|
||||
"""
|
||||
if not date_list:
|
||||
return None
|
||||
|
||||
valid_dates: list[tuple[str, datetime]] = []
|
||||
|
||||
for date_str in date_list:
|
||||
try:
|
||||
# Parse the date string and store both original string and parsed datetime
|
||||
parsed_date = parse_flexible_date(date_str)
|
||||
if parsed_date is None:
|
||||
continue
|
||||
valid_dates.append((date_str, parsed_date))
|
||||
except ValueError:
|
||||
# Skip invalid date strings
|
||||
continue
|
||||
|
||||
if not valid_dates:
|
||||
return None
|
||||
|
||||
# Find the date string with the maximum datetime value
|
||||
newest_date_str: str = max(valid_dates, key=lambda x: x[1])[0]
|
||||
|
||||
return newest_date_str
|
||||
|
||||
|
||||
def parse_day_of_week_range(dow_days: str) -> list[tuple[int, str]]:
|
||||
"""
|
||||
Parse a day of week list/range string and return a list of tuples with day index and name.
|
||||
Allowed are short (eg Mon) or long names (eg Monday).
|
||||
|
||||
Arguments:
|
||||
dow_days {str} -- A comma-separated list of days or ranges (e.g., "Mon,Wed-Fri")
|
||||
|
||||
Raises:
|
||||
ValueError: If the input format is invalid or if duplicate days are found.
|
||||
|
||||
Returns:
|
||||
list[tuple[int, str]] -- A list of tuples containing the day index and name.
|
||||
"""
|
||||
# we have Sun twice because it can be 0 or 7
|
||||
# Mon is 1 and Sun is 7, which is ISO standard
|
||||
dow_day = dow_days.split(",")
|
||||
dow_day = [day.strip() for day in dow_day if day.strip()]
|
||||
__out_dow_days: list[tuple[int, str]] = []
|
||||
for __dow_day in dow_day:
|
||||
# if we have a "-" in there fill
|
||||
if "-" in __dow_day:
|
||||
__dow_range = __dow_day.split("-")
|
||||
__dow_range = [day.strip().capitalize() for day in __dow_range if day.strip()]
|
||||
try:
|
||||
start_day = DAYS_OF_WEEK_ISO_REVERSED[__dow_range[0]]
|
||||
end_day = DAYS_OF_WEEK_ISO_REVERSED[__dow_range[1]]
|
||||
except KeyError:
|
||||
# try long time
|
||||
try:
|
||||
start_day = DAYS_OF_WEEK_ISO_REVERSED[DAYS_OF_WEEK_LONG_TO_SHORT[__dow_range[0]]]
|
||||
end_day = DAYS_OF_WEEK_ISO_REVERSED[DAYS_OF_WEEK_LONG_TO_SHORT[__dow_range[1]]]
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Invalid day of week entry found: {__dow_day}: {e}") from e
|
||||
# Check if this spans across the weekend (e.g., Fri-Mon)
|
||||
if start_day > end_day:
|
||||
# Handle weekend-spanning range: start_day to 7, then 1 to end_day
|
||||
__out_dow_days.extend(
|
||||
[
|
||||
(i, DAYS_OF_WEEK_ISO[i])
|
||||
for i in range(start_day, 8) # start_day to Sunday (7)
|
||||
]
|
||||
)
|
||||
__out_dow_days.extend(
|
||||
[
|
||||
(i, DAYS_OF_WEEK_ISO[i])
|
||||
for i in range(1, end_day + 1) # Monday (1) to end_day
|
||||
]
|
||||
)
|
||||
else:
|
||||
# Normal range: start_day to end_day
|
||||
__out_dow_days.extend(
|
||||
[
|
||||
(i, DAYS_OF_WEEK_ISO[i])
|
||||
for i in range(start_day, end_day + 1)
|
||||
]
|
||||
)
|
||||
else:
|
||||
try:
|
||||
__out_dow_days.append((DAYS_OF_WEEK_ISO_REVERSED[__dow_day], __dow_day))
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Invalid day of week entry found: {__dow_day}: {e}") from e
|
||||
# if there are duplicates, alert
|
||||
if len(__out_dow_days) != len(set(__out_dow_days)):
|
||||
raise ValueError(f"Duplicate day of week entries found: {__out_dow_days}")
|
||||
|
||||
return __out_dow_days
|
||||
|
||||
|
||||
def parse_time_range(time_str: str, time_format: str = "%H:%M") -> tuple[time, time]:
|
||||
"""
|
||||
Parse a time range string in the format "HH:MM-HH:MM" and return a tuple of two time objects.
|
||||
|
||||
Arguments:
|
||||
time_str {str} -- The time range string to parse.
|
||||
|
||||
Raises:
|
||||
ValueError: Invalid time block set
|
||||
ValueError: Invalid time format
|
||||
ValueError: Start time must be before end time
|
||||
|
||||
Returns:
|
||||
tuple[time, time] -- start time, end time: leading zeros formattd
|
||||
"""
|
||||
__time_str = time_str.strip()
|
||||
# split by "-"
|
||||
__time_split = __time_str.split("-")
|
||||
if len(__time_split) != 2:
|
||||
raise ValueError(f"Invalid time block: {__time_str}")
|
||||
try:
|
||||
__time_start = datetime.strptime(__time_split[0], time_format).time()
|
||||
__time_end = datetime.strptime(__time_split[1], time_format).time()
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid time block format [{__time_str}]: {e}") from e
|
||||
if __time_start >= __time_end:
|
||||
raise ValueError(f"Invalid time block set, start time after end time or equal: {__time_str}")
|
||||
|
||||
return __time_start, __time_end
|
||||
|
||||
|
||||
def times_overlap_or_connect(time1: tuple[time, time], time2: tuple[time, time], allow_touching: bool = False) -> bool:
|
||||
"""
|
||||
Check if two time ranges overlap or connect
|
||||
|
||||
Args:
|
||||
time1 (tuple): (start_time, end_time) for first range
|
||||
time2 (tuple): (start_time, end_time) for second range
|
||||
allow_touching (bool): If True, touching ranges (e.g., 8:00-10:00 and 10:00-12:00) are allowed
|
||||
|
||||
Returns:
|
||||
bool: True if ranges overlap or connect (based on allow_touching)
|
||||
"""
|
||||
start1, end1 = time1
|
||||
start2, end2 = time2
|
||||
|
||||
if allow_touching:
|
||||
# Only check for actual overlap (touching is OK)
|
||||
return start1 < end2 and start2 < end1
|
||||
# Check for overlap OR touching
|
||||
return start1 <= end2 and start2 <= end1
|
||||
|
||||
|
||||
def is_time_in_range(current_time: str, start_time: str, end_time: str) -> bool:
|
||||
"""
|
||||
Check if current_time is within start_time and end_time (inclusive)
|
||||
Time format: "HH:MM" (24-hour format)
|
||||
|
||||
Arguments:
|
||||
current_time {str} -- _description_
|
||||
start_time {str} -- _description_
|
||||
end_time {str} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
# Convert string times to time objects
|
||||
current = datetime.strptime(current_time, "%H:%M:%S").time()
|
||||
start = datetime.strptime(start_time, "%H:%M:%S").time()
|
||||
end = datetime.strptime(end_time, "%H:%M:%S").time()
|
||||
|
||||
# Handle case where range crosses midnight (e.g., 22:00 to 06:00)
|
||||
if start <= end:
|
||||
# Normal case: start time is before end time
|
||||
return start <= current <= end
|
||||
# Crosses midnight: e.g., 22:00 to 06:00
|
||||
return current >= start or current <= end
|
||||
|
||||
|
||||
def reorder_weekdays_from_today(base_day: str) -> dict[int, str]:
|
||||
"""
|
||||
Reorder the days of the week starting from the specified base_day.
|
||||
|
||||
Arguments:
|
||||
base_day {str} -- The day to start the week from (e.g., "Mon").
|
||||
|
||||
Returns:
|
||||
dict[int, str] -- A dictionary mapping day numbers to day names.
|
||||
"""
|
||||
try:
|
||||
today_num = DAYS_OF_WEEK_ISO_REVERSED[base_day]
|
||||
except KeyError:
|
||||
try:
|
||||
today_num = DAYS_OF_WEEK_ISO_REVERSED[DAYS_OF_WEEK_LONG_TO_SHORT[base_day]]
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Invalid day name provided: {base_day}: {e}") from e
|
||||
# Convert to list of tuples
|
||||
items = list(DAYS_OF_WEEK_ISO.items())
|
||||
# Reorder: from today onwards + from beginning to yesterday
|
||||
reordered_items = items[today_num - 1:] + items[:today_num - 1]
|
||||
|
||||
# Convert back to dictionary
|
||||
return dict(reordered_items)
|
||||
|
||||
# __END__
|
||||
200
src/corelibs/datetime_handling/timestamp_convert.py
Normal file
200
src/corelibs/datetime_handling/timestamp_convert.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""
|
||||
Convert timestamp strings with time units into seconds and vice versa.
|
||||
"""
|
||||
|
||||
from math import floor
|
||||
import re
|
||||
from corelibs.var_handling.var_helpers import is_float
|
||||
|
||||
|
||||
class TimeParseError(Exception):
|
||||
"""Custom exception for time parsing errors."""
|
||||
|
||||
|
||||
class TimeUnitError(Exception):
|
||||
"""Custom exception for time parsing errors."""
|
||||
|
||||
|
||||
def convert_to_seconds(time_string: str | int | float) -> int:
|
||||
"""
|
||||
Conver a string with time units into a seconds string
|
||||
The following units are allowed
|
||||
Y: 365 days
|
||||
M: 30 days
|
||||
d, h, m, s
|
||||
|
||||
Arguments:
|
||||
time_string {str} -- _description_
|
||||
|
||||
Raises:
|
||||
ValueError: _description_
|
||||
|
||||
Returns:
|
||||
int -- _description_
|
||||
"""
|
||||
|
||||
# skip out if this is a number of any type
|
||||
# numbers will br made float, rounded and then converted to int
|
||||
if is_float(time_string):
|
||||
return int(round(float(time_string)))
|
||||
time_string = str(time_string)
|
||||
|
||||
# Check if the time string is negative
|
||||
negative = time_string.startswith('-')
|
||||
if negative:
|
||||
time_string = time_string[1:] # Remove the negative sign for processing
|
||||
|
||||
# Define time unit conversion factors
|
||||
unit_factors: dict[str, int] = {
|
||||
'Y': 31536000, # 365 days * 86400 seconds/day
|
||||
'M': 2592000 * 12, # 1 year in seconds (assuming 365 days per year)
|
||||
'd': 86400, # 1 day in seconds
|
||||
'h': 3600, # 1 hour in seconds
|
||||
'm': 60, # minutes to seconds
|
||||
's': 1 # 1 second in seconds
|
||||
}
|
||||
long_unit_names: dict[str, str] = {
|
||||
'year': 'Y',
|
||||
'years': 'Y',
|
||||
'month': 'M',
|
||||
'months': 'M',
|
||||
'day': 'd',
|
||||
'days': 'd',
|
||||
'hour': 'h',
|
||||
'hours': 'h',
|
||||
'minute': 'm',
|
||||
'minutes': 'm',
|
||||
'min': 'm',
|
||||
'second': 's',
|
||||
'seconds': 's',
|
||||
'sec': 's',
|
||||
}
|
||||
|
||||
total_seconds = 0
|
||||
|
||||
seen_units: list[str] = [] # Track units that have been encountered
|
||||
|
||||
# Use regex to match number and time unit pairs
|
||||
for match in re.finditer(r'(\d+)\s*([a-zA-Z]+)', time_string):
|
||||
value, unit = int(match.group(1)), match.group(2)
|
||||
|
||||
# full name check, fallback to original name
|
||||
unit = long_unit_names.get(unit.lower(), unit)
|
||||
|
||||
# Check for duplicate units
|
||||
if unit in seen_units:
|
||||
raise TimeParseError(f"Unit '{unit}' appears more than once.")
|
||||
# Check invalid unit
|
||||
if unit not in unit_factors:
|
||||
raise TimeUnitError(f"Unit '{unit}' is not a valid unit name.")
|
||||
# Add to total seconds based on the units
|
||||
if unit in unit_factors:
|
||||
total_seconds += value * unit_factors[unit]
|
||||
|
||||
seen_units.append(unit)
|
||||
|
||||
return -total_seconds if negative else total_seconds
|
||||
|
||||
|
||||
def seconds_to_string(seconds: str | int | float, show_microseconds: bool = False) -> str:
|
||||
"""
|
||||
Convert seconds to compact human readable format (e.g., "1d 2h 3m 4.567s")
|
||||
Zero values are omitted.
|
||||
milliseconds if requested are added as fractional part of seconds.
|
||||
Supports negative values with "-" prefix
|
||||
if not int or float, will return as is
|
||||
|
||||
Args:
|
||||
seconds (float): Time in seconds (can be negative)
|
||||
show_microseconds (bool): Whether to show microseconds precision
|
||||
|
||||
Returns:
|
||||
str: Compact human readable time format
|
||||
"""
|
||||
# if not int or float, return as is
|
||||
if not isinstance(seconds, (int, float)):
|
||||
return seconds
|
||||
# Handle negative values
|
||||
negative = seconds < 0
|
||||
seconds = abs(seconds)
|
||||
|
||||
whole_seconds = int(seconds)
|
||||
fractional = seconds - whole_seconds
|
||||
|
||||
days = whole_seconds // 86400
|
||||
hours = (whole_seconds % 86400) // 3600
|
||||
minutes = (whole_seconds % 3600) // 60
|
||||
secs = whole_seconds % 60
|
||||
|
||||
parts: list[str] = []
|
||||
if days > 0:
|
||||
parts.append(f"{days}d")
|
||||
if hours > 0:
|
||||
parts.append(f"{hours}h")
|
||||
if minutes > 0:
|
||||
parts.append(f"{minutes}m")
|
||||
|
||||
# Handle seconds with fractional part
|
||||
if fractional > 0:
|
||||
if show_microseconds:
|
||||
total_seconds = secs + fractional
|
||||
formatted = f"{total_seconds:.6f}".rstrip('0').rstrip('.')
|
||||
parts.append(f"{formatted}s")
|
||||
else:
|
||||
total_seconds = secs + fractional
|
||||
formatted = f"{total_seconds:.3f}".rstrip('0').rstrip('.')
|
||||
parts.append(f"{formatted}s")
|
||||
elif secs > 0 or not parts:
|
||||
parts.append(f"{secs}s")
|
||||
|
||||
result = " ".join(parts)
|
||||
return f"-{result}" if negative else result
|
||||
|
||||
|
||||
def convert_timestamp(timestamp: float | int | str, show_microseconds: bool = True) -> str:
|
||||
"""
|
||||
format timestamp into human readable format. This function will add 0 values between set values
|
||||
for example if we have 1d 1s it would output 1d 0h 0m 1s
|
||||
Milliseconds will be shown if set, and added with ms at the end
|
||||
Negative values will be prefixed with "-"
|
||||
if not int or float, will return as is
|
||||
|
||||
Arguments:
|
||||
timestamp {float} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
show_micro {bool} -- _description_ (default: {True})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
if not isinstance(timestamp, (int, float)):
|
||||
return timestamp
|
||||
# cut of the ms, but first round them up to four
|
||||
__timestamp_ms_split = str(round(timestamp, 4)).split(".")
|
||||
timestamp = int(__timestamp_ms_split[0])
|
||||
negative = timestamp < 0
|
||||
timestamp = abs(timestamp)
|
||||
try:
|
||||
ms = int(__timestamp_ms_split[1])
|
||||
except IndexError:
|
||||
ms = 0
|
||||
timegroups = (86400, 3600, 60, 1)
|
||||
output: list[int] = []
|
||||
for i in timegroups:
|
||||
output.append(int(floor(timestamp / i)))
|
||||
timestamp = timestamp % i
|
||||
# output has days|hours|min|sec ms
|
||||
time_string = ""
|
||||
if output[0]:
|
||||
time_string = f"{output[0]}d "
|
||||
if output[0] or output[1]:
|
||||
time_string += f"{output[1]}h "
|
||||
if output[0] or output[1] or output[2]:
|
||||
time_string += f"{output[2]}m "
|
||||
time_string += f"{output[3]}s"
|
||||
if show_microseconds:
|
||||
time_string += f" {ms}ms" if ms else " 0ms"
|
||||
return f"-{time_string}" if negative else time_string
|
||||
|
||||
# __END__
|
||||
32
src/corelibs/datetime_handling/timestamp_strings.py
Normal file
32
src/corelibs/datetime_handling/timestamp_strings.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
Current timestamp strings and time zones
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
|
||||
|
||||
|
||||
class TimestampStrings:
|
||||
"""
|
||||
set default time stamps
|
||||
"""
|
||||
|
||||
TIME_ZONE: str = 'Asia/Tokyo'
|
||||
|
||||
def __init__(self, time_zone: str | ZoneInfo | None = None):
|
||||
self.timestamp_now = datetime.now()
|
||||
# set time zone as string
|
||||
time_zone = time_zone if time_zone is not None else self.TIME_ZONE
|
||||
self.time_zone = str(time_zone) if not isinstance(time_zone, str) else time_zone
|
||||
# set ZoneInfo type
|
||||
try:
|
||||
self.time_zone_zi = ZoneInfo(self.time_zone)
|
||||
except ZoneInfoNotFoundError as e:
|
||||
raise ValueError(f'Zone could not be loaded [{self.time_zone}]: {e}') from e
|
||||
self.timestamp_now_tz = datetime.now(self.time_zone_zi)
|
||||
self.today = self.timestamp_now.strftime('%Y-%m-%d')
|
||||
self.timestamp = self.timestamp_now.strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.timestamp_tz = self.timestamp_now_tz.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
self.timestamp_file = self.timestamp_now.strftime("%Y-%m-%d_%H%M%S")
|
||||
|
||||
# __END__
|
||||
0
src/corelibs/db_handling/__init__.py
Normal file
0
src/corelibs/db_handling/__init__.py
Normal file
214
src/corelibs/db_handling/sqlite_io.py
Normal file
214
src/corelibs/db_handling/sqlite_io.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
SQLite DB::IO
|
||||
Will be moved to the CoreLibs
|
||||
also method names are subject to change
|
||||
"""
|
||||
|
||||
# import gc
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal, TYPE_CHECKING
|
||||
import sqlite3
|
||||
from corelibs.debug_handling.debug_helpers import call_stack
|
||||
if TYPE_CHECKING:
|
||||
from corelibs.logging_handling.log import Logger
|
||||
|
||||
|
||||
class SQLiteIO():
|
||||
"""Mini SQLite interface"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log: 'Logger',
|
||||
db_name: str | Path,
|
||||
autocommit: bool = False,
|
||||
enable_fkey: bool = True,
|
||||
row_factory: str | None = None
|
||||
):
|
||||
self.log = log
|
||||
self.db_name = db_name
|
||||
self.autocommit = autocommit
|
||||
self.enable_fkey = enable_fkey
|
||||
self.row_factory = row_factory
|
||||
self.conn: sqlite3.Connection | None = self.db_connect()
|
||||
|
||||
# def __del__(self):
|
||||
# self.db_close()
|
||||
|
||||
def db_connect(self) -> sqlite3.Connection | None:
|
||||
"""
|
||||
Connect to SQLite database, create if it doesn't exist
|
||||
"""
|
||||
try:
|
||||
# Connect to database (creates if doesn't exist)
|
||||
self.conn = sqlite3.connect(self.db_name, autocommit=self.autocommit)
|
||||
self.conn.setconfig(sqlite3.SQLITE_DBCONFIG_ENABLE_FKEY, True)
|
||||
# self.conn.execute("PRAGMA journal_mode=WAL")
|
||||
# self.log.debug(f"Connected to database: {self.db_name}")
|
||||
|
||||
def dict_factory(cursor: sqlite3.Cursor, row: list[Any]):
|
||||
fields = [column[0] for column in cursor.description]
|
||||
return dict(zip(fields, row))
|
||||
|
||||
match self.row_factory:
|
||||
case 'Row':
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
case 'Dict':
|
||||
self.conn.row_factory = dict_factory
|
||||
case _:
|
||||
self.conn.row_factory = None
|
||||
|
||||
return self.conn
|
||||
except (sqlite3.Error, sqlite3.OperationalError) as e:
|
||||
self.log.error(f"Error connecting to database [{type(e).__name__}] [{self.db_name}]: {e} [{call_stack()}]")
|
||||
self.log.error(f"Error code: {e.sqlite_errorcode if hasattr(e, 'sqlite_errorcode') else 'N/A'}")
|
||||
self.log.error(f"Error name: {e.sqlite_errorname if hasattr(e, 'sqlite_errorname') else 'N/A'}")
|
||||
return None
|
||||
|
||||
def db_close(self):
|
||||
"""close connection"""
|
||||
if self.conn is not None:
|
||||
self.conn.close()
|
||||
self.conn = None
|
||||
|
||||
def db_connected(self) -> bool:
|
||||
"""
|
||||
Return True if db connection is not none
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
return True if self.conn else False
|
||||
|
||||
def __content_exists(self, content_name: str, sql_type: str) -> bool:
|
||||
"""
|
||||
Check if some content name for a certain type exists
|
||||
|
||||
Arguments:
|
||||
content_name {str} -- _description_
|
||||
sql_type {str} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
if self.conn is None:
|
||||
return False
|
||||
try:
|
||||
cursor = self.conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT name
|
||||
FROM sqlite_master
|
||||
WHERE type = ? AND name = ?
|
||||
""", (sql_type, content_name,))
|
||||
return cursor.fetchone() is not None
|
||||
except sqlite3.Error as e:
|
||||
self.log.error(f"Error checking table [{content_name}/{sql_type}] existence: {e} [{call_stack()}]")
|
||||
return False
|
||||
|
||||
def table_exists(self, table_name: str) -> bool:
|
||||
"""
|
||||
Check if a table exists in the database
|
||||
"""
|
||||
return self.__content_exists(table_name, 'table')
|
||||
|
||||
def trigger_exists(self, trigger_name: str) -> bool:
|
||||
"""
|
||||
Check if a triggere exits
|
||||
"""
|
||||
return self.__content_exists(trigger_name, 'trigger')
|
||||
|
||||
def index_exists(self, index_name: str) -> bool:
|
||||
"""
|
||||
Check if a triggere exits
|
||||
"""
|
||||
return self.__content_exists(index_name, 'index')
|
||||
|
||||
def meta_data_detail(self, table_name: str) -> list[tuple[Any, ...]] | list[dict[str, Any]] | Literal[False]:
|
||||
"""table detail"""
|
||||
query_show_table = """
|
||||
SELECT
|
||||
ti.cid, ti.name, ti.type, ti.'notnull', ti.dflt_value, ti.pk,
|
||||
il_ii.idx_name, il_ii.idx_unique, il_ii.idx_origin, il_ii.idx_partial
|
||||
FROM
|
||||
sqlite_schema AS m,
|
||||
pragma_table_info(m.name) AS ti
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
il.name AS idx_name, il.'unique' AS idx_unique, il.origin AS idx_origin, il.partial AS idx_partial,
|
||||
ii.cid AS tbl_cid
|
||||
FROM
|
||||
sqlite_schema AS m,
|
||||
pragma_index_list(m.name) AS il,
|
||||
pragma_index_info(il.name) AS ii
|
||||
WHERE m.name = ?1
|
||||
) AS il_ii ON (ti.cid = il_ii.tbl_cid)
|
||||
WHERE
|
||||
m.name = ?1
|
||||
"""
|
||||
return self.execute_query(query_show_table, (table_name,))
|
||||
|
||||
def execute_cursor(
|
||||
self, query: str, params: tuple[Any, ...] | None = None
|
||||
) -> sqlite3.Cursor | Literal[False]:
|
||||
"""execute a cursor, used in execute query or return one and for fetch_row"""
|
||||
if self.conn is None:
|
||||
self.log.warning(f"No connection [{call_stack()}]")
|
||||
return False
|
||||
try:
|
||||
cursor = self.conn.cursor()
|
||||
if params:
|
||||
cursor.execute(query, params)
|
||||
else:
|
||||
cursor.execute(query)
|
||||
return cursor
|
||||
except sqlite3.Error as e:
|
||||
self.log.error(f"Error during executing cursor [{query}:{params}]: {e} [{call_stack()}]")
|
||||
return False
|
||||
|
||||
def execute_query(
|
||||
self, query: str, params: tuple[Any, ...] | None = None
|
||||
) -> list[tuple[Any, ...]] | list[dict[str, Any]] | Literal[False]:
|
||||
"""query execute with or without params, returns result"""
|
||||
if self.conn is None:
|
||||
self.log.warning(f"No connection [{call_stack()}]")
|
||||
return False
|
||||
try:
|
||||
if (cursor := self.execute_cursor(query, params)) is False:
|
||||
return False
|
||||
# fetch before commit because we need to get the RETURN before
|
||||
result = cursor.fetchall()
|
||||
# this is for INSERT/UPDATE/CREATE only
|
||||
self.conn.commit()
|
||||
return result
|
||||
except sqlite3.Error as e:
|
||||
self.log.error(f"Error during executing query [{query}:{params}]: {e} [{call_stack()}]")
|
||||
return False
|
||||
|
||||
def return_one(
|
||||
self, query: str, params: tuple[Any, ...] | None = None
|
||||
) -> tuple[Any, ...] | dict[str, Any] | Literal[False] | None:
|
||||
"""return one row, only for SELECT"""
|
||||
if self.conn is None:
|
||||
self.log.warning(f"No connection [{call_stack()}]")
|
||||
return False
|
||||
try:
|
||||
if (cursor := self.execute_cursor(query, params)) is False:
|
||||
return False
|
||||
return cursor.fetchone()
|
||||
except sqlite3.Error as e:
|
||||
self.log.error(f"Error during return one: {e} [{call_stack()}]")
|
||||
return False
|
||||
|
||||
def fetch_row(
|
||||
self, cursor: sqlite3.Cursor | Literal[False]
|
||||
) -> tuple[Any, ...] | dict[str, Any] | Literal[False] | None:
|
||||
"""read from cursor"""
|
||||
if self.conn is None or cursor is False:
|
||||
self.log.warning(f"No connection [{call_stack()}]")
|
||||
return False
|
||||
try:
|
||||
return cursor.fetchone()
|
||||
except sqlite3.Error as e:
|
||||
self.log.error(f"Error during fetch row: {e} [{call_stack()}]")
|
||||
return False
|
||||
|
||||
# __END__
|
||||
76
src/corelibs/debug_handling/debug_helpers.py
Normal file
76
src/corelibs/debug_handling/debug_helpers.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Various debug helpers
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import sys
|
||||
from typing import Tuple, Type
|
||||
from types import TracebackType
|
||||
|
||||
# _typeshed.OptExcInfo
|
||||
OptExcInfo = Tuple[None, None, None] | Tuple[Type[BaseException], BaseException, TracebackType]
|
||||
|
||||
|
||||
def call_stack(
|
||||
start: int = 0,
|
||||
skip_last: int = -1,
|
||||
separator: str = ' -> ',
|
||||
reset_start_if_empty: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
get the trace for the last entry
|
||||
|
||||
Keyword Arguments:
|
||||
start {int} -- start, if too might output will empty until reset_start_if_empty is set (default: {0})
|
||||
skip_last {int} -- how many of the last are skipped, defaults to -1 for current method (default: {-1})
|
||||
seperator {str} -- add stack separator, if empty defaults to ' -> ' (default: { -> })
|
||||
reset_start_if_empty {bool} -- if no stack returned because of too high start,
|
||||
reset to 0 for full read (default: {False})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
# stack = traceback.extract_stack()[start:depth]
|
||||
# how many of the last entries we skip (so we do not get self), default is -1
|
||||
# start cannot be negative
|
||||
if skip_last > 0:
|
||||
skip_last = skip_last * -1
|
||||
stack = traceback.extract_stack()
|
||||
__stack = stack[start:skip_last]
|
||||
# start possible to high, reset start to 0
|
||||
if not __stack and reset_start_if_empty:
|
||||
start = 0
|
||||
__stack = stack[start:skip_last]
|
||||
if not separator:
|
||||
separator = ' -> '
|
||||
# print(f"* HERE: {dump_data(stack)}")
|
||||
return f"{separator}".join(f"{os.path.basename(f.filename)}:{f.name}:{f.lineno}" for f in __stack)
|
||||
|
||||
|
||||
def exception_stack(
|
||||
exc_stack: OptExcInfo | None = None,
|
||||
separator: str = ' -> '
|
||||
) -> str:
|
||||
"""
|
||||
Exception traceback, if no sys.exc_info is set, run internal
|
||||
|
||||
Keyword Arguments:
|
||||
exc_stack {OptExcInfo | None} -- _description_ (default: {None})
|
||||
separator {str} -- _description_ (default: {' -> '})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
if exc_stack is not None:
|
||||
_, _, exc_traceback = exc_stack
|
||||
else:
|
||||
exc_traceback = None
|
||||
_, _, exc_traceback = sys.exc_info()
|
||||
stack = traceback.extract_tb(exc_traceback)
|
||||
if not separator:
|
||||
separator = ' -> '
|
||||
# print(f"* HERE: {dump_data(stack)}")
|
||||
return f"{separator}".join(f"{os.path.basename(f.filename)}:{f.name}:{f.lineno}" for f in stack)
|
||||
|
||||
# __END__
|
||||
@@ -6,7 +6,7 @@ import json
|
||||
from typing import Any
|
||||
|
||||
|
||||
def dump_data(data: dict[Any, Any] | list[Any] | str | None) -> str:
|
||||
def dump_data(data: Any, use_indent: bool = True) -> str:
|
||||
"""
|
||||
dump formated output from dict/list
|
||||
|
||||
@@ -16,6 +16,7 @@ def dump_data(data: dict[Any, Any] | list[Any] | str | None) -> str:
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
return json.dumps(data, indent=4, ensure_ascii=False, default=str)
|
||||
indent = 4 if use_indent else None
|
||||
return json.dumps(data, indent=indent, ensure_ascii=False, default=str)
|
||||
|
||||
# __END__
|
||||
@@ -4,10 +4,10 @@ Various small helpers for data writing
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from io import TextIOWrapper
|
||||
from io import TextIOWrapper, StringIO
|
||||
|
||||
|
||||
def write_l(line: str, fpl: 'TextIOWrapper | None' = None, print_line: bool = False):
|
||||
def write_l(line: str, fpl: 'TextIOWrapper | StringIO | None' = None, print_line: bool = False):
|
||||
"""
|
||||
Write a line to screen and to output file
|
||||
|
||||
|
||||
0
src/corelibs/email_handling/__init__.py
Normal file
0
src/corelibs/email_handling/__init__.py
Normal file
199
src/corelibs/email_handling/send_email.py
Normal file
199
src/corelibs/email_handling/send_email.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Send email wrapper
|
||||
"""
|
||||
|
||||
import smtplib
|
||||
from email.message import EmailMessage
|
||||
from typing import TYPE_CHECKING, Any
|
||||
if TYPE_CHECKING:
|
||||
from corelibs.logging_handling.log import Logger
|
||||
|
||||
|
||||
class SendEmail:
|
||||
"""
|
||||
send emails based on a template to a list of receivers
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log: "Logger",
|
||||
settings: dict[str, Any],
|
||||
template: dict[str, str],
|
||||
from_email: str,
|
||||
combined_send: bool = True,
|
||||
receivers: list[str] | None = None,
|
||||
data: list[dict[str, str]] | None = None,
|
||||
):
|
||||
"""
|
||||
init send email class
|
||||
|
||||
Args:
|
||||
template (dict): Dictionary with body and subject
|
||||
from_email (str): from email as "Name" <email>
|
||||
combined_send (bool): True for sending as one set for all receivers
|
||||
receivers (list): list of emails to send to
|
||||
data (dict): data to replace in template
|
||||
args (Namespace): _description_
|
||||
"""
|
||||
self.log = log
|
||||
self.settings = settings
|
||||
# internal settings
|
||||
self.template = template
|
||||
self.from_email = from_email
|
||||
self.combined_send = combined_send
|
||||
self.receivers = receivers
|
||||
self.data = data
|
||||
|
||||
def send_email(
|
||||
self,
|
||||
data: list[dict[str, str]] | None,
|
||||
receivers: list[str] | None,
|
||||
template: dict[str, str] | None = None,
|
||||
from_email: str | None = None,
|
||||
combined_send: bool | None = None,
|
||||
test_only: bool | None = None
|
||||
):
|
||||
"""
|
||||
build email and send
|
||||
|
||||
Arguments:
|
||||
data {list[dict[str, str]] | None} -- _description_
|
||||
receivers {list[str] | None} -- _description_
|
||||
combined_send {bool | None} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
template {dict[str, str] | None} -- _description_ (default: {None})
|
||||
from_email {str | None} -- _description_ (default: {None})
|
||||
|
||||
Raises:
|
||||
ValueError: _description_
|
||||
ValueError: _description_
|
||||
"""
|
||||
if data is None and self.data is not None:
|
||||
data = self.data
|
||||
if data is None:
|
||||
raise ValueError("No replace data set, cannot send email")
|
||||
if receivers is None and self.receivers is not None:
|
||||
receivers = self.receivers
|
||||
if receivers is None:
|
||||
raise ValueError("No receivers list set, cannot send email")
|
||||
if combined_send is None:
|
||||
combined_send = self.combined_send
|
||||
if test_only is not None:
|
||||
self.settings['test'] = test_only
|
||||
|
||||
if template is None:
|
||||
template = self.template
|
||||
if from_email is None:
|
||||
from_email = self.from_email
|
||||
|
||||
if not template['subject'] or not template['body']:
|
||||
raise ValueError("Both Subject and Body must be set")
|
||||
|
||||
self.log.debug(
|
||||
"[EMAIL]:\n"
|
||||
f"Subject: {template['subject']}\n"
|
||||
f"Body: {template['body']}\n"
|
||||
f"From: {from_email}\n"
|
||||
f"Combined send: {combined_send}\n"
|
||||
f"Receivers: {receivers}\n"
|
||||
f"Replace data: {data}"
|
||||
)
|
||||
|
||||
# send email
|
||||
self.send_email_list(
|
||||
self.prepare_email_content(
|
||||
from_email, template, data
|
||||
),
|
||||
receivers,
|
||||
combined_send,
|
||||
test_only
|
||||
)
|
||||
|
||||
def prepare_email_content(
|
||||
self,
|
||||
from_email: str,
|
||||
template: dict[str, str],
|
||||
data: list[dict[str, str]],
|
||||
) -> list[EmailMessage]:
|
||||
"""
|
||||
prepare email for sending
|
||||
|
||||
Args:
|
||||
template (dict): template data for this email
|
||||
data (dict): data to replace in email
|
||||
|
||||
Returns:
|
||||
list: Email Message Objects as list
|
||||
"""
|
||||
_subject = ""
|
||||
_body = ""
|
||||
msg: list[EmailMessage] = []
|
||||
for replace in data:
|
||||
_subject = template["subject"]
|
||||
_body = template["body"]
|
||||
for key, value in replace.items():
|
||||
_subject = _subject.replace(f"{{{{{key}}}}}", value)
|
||||
_body = _body.replace(f"{{{{{key}}}}}", value)
|
||||
# create a simple email and add subhect, from email
|
||||
msg_email = EmailMessage()
|
||||
# msg.set_content(_body, charset='utf-8', cte='quoted-printable')
|
||||
msg_email.set_content(_body, charset="utf-8")
|
||||
msg_email["Subject"] = _subject
|
||||
msg_email["From"] = from_email
|
||||
# push to array for sening
|
||||
msg.append(msg_email)
|
||||
return msg
|
||||
|
||||
def send_email_list(
|
||||
self,
|
||||
email: list[EmailMessage], receivers: list[str],
|
||||
combined_send: bool | None = None,
|
||||
test_only: bool | None = None
|
||||
):
|
||||
"""
|
||||
send email to receivers list
|
||||
|
||||
Args:
|
||||
email (list): Email Message object with set obdy, subject, from as list
|
||||
receivers (array): email receivers list as array
|
||||
combined_send (bool): True for sending as one set for all receivers
|
||||
"""
|
||||
|
||||
if test_only is not None:
|
||||
self.settings['test'] = test_only
|
||||
|
||||
# localhost (postfix does the rest)
|
||||
smtp = None
|
||||
smtp_host = self.settings.get('smtp_host', "localhost")
|
||||
try:
|
||||
smtp = smtplib.SMTP(smtp_host)
|
||||
except ConnectionRefusedError as e:
|
||||
self.log.error("Could not open SMTP connection to: %s, %s", smtp_host, e)
|
||||
# loop over messages and then over recievers
|
||||
for msg in email:
|
||||
if combined_send is True:
|
||||
msg["To"] = ", ".join(receivers)
|
||||
if not self.settings.get('test'):
|
||||
if smtp is not None:
|
||||
smtp.send_message(msg, msg["From"], receivers)
|
||||
else:
|
||||
self.log.info(f"[EMAIL] Test, not sending email\n{msg}")
|
||||
else:
|
||||
for receiver in receivers:
|
||||
# send to
|
||||
self.log.debug(f"===> Send to: {receiver}")
|
||||
if "To" in msg:
|
||||
msg.replace_header("To", receiver)
|
||||
else:
|
||||
msg["To"] = receiver
|
||||
if not self.settings.get('test'):
|
||||
if smtp is not None:
|
||||
smtp.send_message(msg)
|
||||
else:
|
||||
self.log.info(f"[EMAIL] Test, not sending email\n{msg}")
|
||||
# close smtp
|
||||
if smtp is not None:
|
||||
smtp.quit()
|
||||
|
||||
# __END__
|
||||
0
src/corelibs/encryption_handling/__init__.py
Normal file
0
src/corelibs/encryption_handling/__init__.py
Normal file
152
src/corelibs/encryption_handling/symmetric_encryption.py
Normal file
152
src/corelibs/encryption_handling/symmetric_encryption.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
simple symmetric encryption
|
||||
Will be moved to CoreLibs
|
||||
TODO: set key per encryption run
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import base64
|
||||
import hashlib
|
||||
from typing import TypedDict, cast
|
||||
from cryptography.fernet import Fernet
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
||||
|
||||
|
||||
class PackageData(TypedDict):
|
||||
"""encryption package"""
|
||||
encrypted_data: str
|
||||
salt: str
|
||||
key_hash: str
|
||||
|
||||
|
||||
class SymmetricEncryption:
|
||||
"""
|
||||
simple encryption
|
||||
|
||||
the encrypted package has "encrypted_data" and "salt" as fields, salt is needed to create the
|
||||
key from the password to decrypt
|
||||
"""
|
||||
|
||||
def __init__(self, password: str):
|
||||
if not password:
|
||||
raise ValueError("A password must be set")
|
||||
self.password = password
|
||||
self.password_hash = hashlib.sha256(password.encode('utf-8')).hexdigest()
|
||||
|
||||
def __derive_key_from_password(self, password: str, salt: bytes) -> bytes:
|
||||
_password = password.encode('utf-8')
|
||||
kdf = PBKDF2HMAC(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=32,
|
||||
salt=salt,
|
||||
iterations=100000,
|
||||
)
|
||||
key = base64.urlsafe_b64encode(kdf.derive(_password))
|
||||
return key
|
||||
|
||||
def __encrypt_with_metadata(self, data: str | bytes) -> PackageData:
|
||||
"""Encrypt data and include salt if password-based"""
|
||||
# convert to bytes (for encoding)
|
||||
if isinstance(data, str):
|
||||
data = data.encode('utf-8')
|
||||
|
||||
# generate salt and key from password
|
||||
salt = os.urandom(16)
|
||||
key = self.__derive_key_from_password(self.password, salt)
|
||||
# init the cypher suit
|
||||
cipher_suite = Fernet(key)
|
||||
|
||||
encrypted_data = cipher_suite.encrypt(data)
|
||||
|
||||
# If using password, include salt in the result
|
||||
return {
|
||||
'encrypted_data': base64.urlsafe_b64encode(encrypted_data).decode('utf-8'),
|
||||
'salt': base64.urlsafe_b64encode(salt).decode('utf-8'),
|
||||
'key_hash': hashlib.sha256(key).hexdigest()
|
||||
}
|
||||
|
||||
def encrypt_with_metadata(self, data: str | bytes, return_as: str = 'str') -> str | bytes | PackageData:
|
||||
"""encrypt with metadata, but returns data in string"""
|
||||
match return_as:
|
||||
case 'str':
|
||||
return self.encrypt_with_metadata_return_str(data)
|
||||
case 'json':
|
||||
return self.encrypt_with_metadata_return_str(data)
|
||||
case 'bytes':
|
||||
return self.encrypt_with_metadata_return_bytes(data)
|
||||
case 'dict':
|
||||
return self.encrypt_with_metadata_return_dict(data)
|
||||
case _:
|
||||
# default is string json
|
||||
return self.encrypt_with_metadata_return_str(data)
|
||||
|
||||
def encrypt_with_metadata_return_dict(self, data: str | bytes) -> PackageData:
|
||||
"""encrypt with metadata, but returns data as PackageData dict"""
|
||||
return self.__encrypt_with_metadata(data)
|
||||
|
||||
def encrypt_with_metadata_return_str(self, data: str | bytes) -> str:
|
||||
"""encrypt with metadata, but returns data in string"""
|
||||
return json.dumps(self.__encrypt_with_metadata(data))
|
||||
|
||||
def encrypt_with_metadata_return_bytes(self, data: str | bytes) -> bytes:
|
||||
"""encrypt with metadata, but returns data in bytes"""
|
||||
return json.dumps(self.__encrypt_with_metadata(data)).encode('utf-8')
|
||||
|
||||
def decrypt_with_metadata(self, encrypted_package: str | bytes | PackageData, password: str | None = None) -> str:
|
||||
"""Decrypt data that may include metadata"""
|
||||
try:
|
||||
# Try to parse as JSON (password-based encryption)
|
||||
if isinstance(encrypted_package, bytes):
|
||||
package_data = cast(PackageData, json.loads(encrypted_package.decode('utf-8')))
|
||||
elif isinstance(encrypted_package, str):
|
||||
package_data = cast(PackageData, json.loads(str(encrypted_package)))
|
||||
else:
|
||||
package_data = encrypted_package
|
||||
|
||||
encrypted_data = base64.urlsafe_b64decode(package_data['encrypted_data'])
|
||||
salt = base64.urlsafe_b64decode(package_data['salt'])
|
||||
pwd = password or self.password
|
||||
key = self.__derive_key_from_password(pwd, salt)
|
||||
if package_data['key_hash'] != hashlib.sha256(key).hexdigest():
|
||||
raise ValueError("Key hash is not matching, possible invalid password")
|
||||
cipher_suite = Fernet(key)
|
||||
decrypted_data = cipher_suite.decrypt(encrypted_data)
|
||||
|
||||
except (json.JSONDecodeError, KeyError, UnicodeDecodeError) as e:
|
||||
raise ValueError(f"Invalid encrypted package format {e}") from e
|
||||
|
||||
return decrypted_data.decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def encrypt_data(data: str | bytes, password: str) -> str:
|
||||
"""
|
||||
Static method to encrypt some data
|
||||
|
||||
Arguments:
|
||||
data {str | bytes} -- _description_
|
||||
password {str} -- _description_
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
encryptor = SymmetricEncryption(password)
|
||||
return encryptor.encrypt_with_metadata_return_str(data)
|
||||
|
||||
@staticmethod
|
||||
def decrypt_data(data: str | bytes | PackageData, password: str) -> str:
|
||||
"""
|
||||
Static method to decrypt some data
|
||||
|
||||
Arguments:
|
||||
data {str | bytes | PackageData} -- _description_
|
||||
password {str} -- _description_
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
decryptor = SymmetricEncryption(password)
|
||||
return decryptor.decrypt_with_metadata(data, password=password)
|
||||
|
||||
# __END__
|
||||
23
src/corelibs/exceptions/csv_exceptions.py
Normal file
23
src/corelibs/exceptions/csv_exceptions.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Exceptions for csv file reading and processing
|
||||
"""
|
||||
|
||||
|
||||
class NoCsvReader(Exception):
|
||||
"""
|
||||
CSV reader is none
|
||||
"""
|
||||
|
||||
|
||||
class CsvHeaderDataMissing(Exception):
|
||||
"""
|
||||
The csv reader returned None as headers, the header column in the csv file is missing
|
||||
"""
|
||||
|
||||
|
||||
class CompulsoryCsvHeaderCheckFailed(Exception):
|
||||
"""
|
||||
raise if the header is not matching to the excpeted values
|
||||
"""
|
||||
|
||||
# __END__
|
||||
75
src/corelibs/file_handling/file_bom_encoding.py
Normal file
75
src/corelibs/file_handling/file_bom_encoding.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
File check if BOM encoded, needed for CSV load
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TypedDict
|
||||
|
||||
|
||||
class BomEncodingInfo(TypedDict):
|
||||
"""BOM encoding info"""
|
||||
has_bom: bool
|
||||
bom_type: str | None
|
||||
encoding: str | None
|
||||
bom_length: int
|
||||
bom_pattern: bytes | None
|
||||
|
||||
|
||||
def is_bom_encoded(file_path: Path) -> bool:
|
||||
"""
|
||||
Detect if a file is BOM encoded
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the file to check
|
||||
|
||||
Returns:
|
||||
bool: True if file has BOM, False otherwise
|
||||
"""
|
||||
return is_bom_encoded_info(file_path)['has_bom']
|
||||
|
||||
|
||||
def is_bom_encoded_info(file_path: Path) -> BomEncodingInfo:
|
||||
"""
|
||||
Enhanced BOM detection with additional file analysis
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the file to check
|
||||
|
||||
Returns:
|
||||
dict: Comprehensive BOM and encoding information
|
||||
"""
|
||||
try:
|
||||
# Read first 1024 bytes for analysis
|
||||
with open(file_path, 'rb') as f:
|
||||
header = f.read(4)
|
||||
|
||||
bom_patterns = {
|
||||
b'\xef\xbb\xbf': ('UTF-8', 'utf-8', 3),
|
||||
b'\xff\xfe\x00\x00': ('UTF-32 LE', 'utf-32-le', 4),
|
||||
b'\x00\x00\xfe\xff': ('UTF-32 BE', 'utf-32-be', 4),
|
||||
b'\xff\xfe': ('UTF-16 LE', 'utf-16-le', 2),
|
||||
b'\xfe\xff': ('UTF-16 BE', 'utf-16-be', 2),
|
||||
}
|
||||
|
||||
for bom_pattern, (encoding_name, encoding, length) in bom_patterns.items():
|
||||
if header.startswith(bom_pattern):
|
||||
return {
|
||||
'has_bom': True,
|
||||
'bom_type': encoding_name,
|
||||
'encoding': encoding,
|
||||
'bom_length': length,
|
||||
'bom_pattern': bom_pattern
|
||||
}
|
||||
|
||||
return {
|
||||
'has_bom': False,
|
||||
'bom_type': None,
|
||||
'encoding': None,
|
||||
'bom_length': 0,
|
||||
'bom_pattern': None
|
||||
}
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error checking BOM encoding: {e}") from e
|
||||
|
||||
|
||||
# __END__
|
||||
@@ -7,7 +7,12 @@ import shutil
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def remove_all_in_directory(directory: Path, ignore_files: list[str] | None = None, verbose: bool = False) -> bool:
|
||||
def remove_all_in_directory(
|
||||
directory: Path,
|
||||
ignore_files: list[str] | None = None,
|
||||
verbose: bool = False,
|
||||
dry_run: bool = False
|
||||
) -> bool:
|
||||
"""
|
||||
remove all files and folders in a directory
|
||||
can exclude files or folders
|
||||
@@ -24,7 +29,10 @@ def remove_all_in_directory(directory: Path, ignore_files: list[str] | None = No
|
||||
if ignore_files is None:
|
||||
ignore_files = []
|
||||
if verbose:
|
||||
print(f"Remove old files in: {directory.name} [", end="", flush=True)
|
||||
print(
|
||||
f"{'[DRY RUN] ' if dry_run else ''}Remove old files in: {directory.name} [",
|
||||
end="", flush=True
|
||||
)
|
||||
# remove all files and folders in given directory by recursive globbing
|
||||
for file in directory.rglob("*"):
|
||||
# skip if in ignore files
|
||||
@@ -32,11 +40,13 @@ def remove_all_in_directory(directory: Path, ignore_files: list[str] | None = No
|
||||
continue
|
||||
# remove one file, or a whole directory
|
||||
if file.is_file():
|
||||
os.remove(file)
|
||||
if not dry_run:
|
||||
os.remove(file)
|
||||
if verbose:
|
||||
print(".", end="", flush=True)
|
||||
elif file.is_dir():
|
||||
shutil.rmtree(file)
|
||||
if not dry_run:
|
||||
shutil.rmtree(file)
|
||||
if verbose:
|
||||
print("/", end="", flush=True)
|
||||
if verbose:
|
||||
|
||||
0
src/corelibs/iterator_handling/__init__.py
Normal file
0
src/corelibs/iterator_handling/__init__.py
Normal file
@@ -2,23 +2,41 @@
|
||||
wrapper around search path
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
from typing import Any, TypedDict, NotRequired
|
||||
from warnings import deprecated
|
||||
|
||||
|
||||
class ArraySearchList(TypedDict):
|
||||
"""find in array from list search dict"""
|
||||
key: str
|
||||
value: str | bool | int | float | list[str | None]
|
||||
case_sensitive: NotRequired[bool]
|
||||
|
||||
|
||||
@deprecated("Use find_in_array_from_list()")
|
||||
def array_search(
|
||||
search_params: list[dict[str, str | bool | list[str | None]]],
|
||||
search_params: list[ArraySearchList],
|
||||
data: list[dict[str, Any]],
|
||||
return_index: bool = False
|
||||
) -> list[dict[str, Any]]:
|
||||
"""depreacted, old call order"""
|
||||
return find_in_array_from_list(data, search_params, return_index)
|
||||
|
||||
|
||||
def find_in_array_from_list(
|
||||
data: list[dict[str, Any]],
|
||||
search_params: list[ArraySearchList],
|
||||
return_index: bool = False
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
search in an array of dicts with an array of Key/Value set
|
||||
search in an list of dicts with an list of Key/Value set
|
||||
all Key/Value sets must match
|
||||
Value set can be list for OR match
|
||||
option: case_senstive: default True
|
||||
|
||||
Args:
|
||||
search_params (list): List of search params in "Key"/"Value" lists with options
|
||||
data (list): data to search in, must be a list
|
||||
search_params (list): List of search params in "key"/"value" lists with options
|
||||
return_index (bool): return index of list [default False]
|
||||
|
||||
Raises:
|
||||
@@ -32,18 +50,20 @@ def array_search(
|
||||
"""
|
||||
if not isinstance(search_params, list): # type: ignore
|
||||
raise ValueError("search_params must be a list")
|
||||
keys = []
|
||||
keys: list[str] = []
|
||||
# check that key and value exist and are set
|
||||
for search in search_params:
|
||||
if not search.get('Key') or not search.get('Value'):
|
||||
if not search.get('key') or not search.get('value'):
|
||||
raise KeyError(
|
||||
f"Either Key '{search.get('Key', '')}' or "
|
||||
f"Value '{search.get('Value', '')}' is missing or empty"
|
||||
f"Either Key '{search.get('key', '')}' or "
|
||||
f"Value '{search.get('value', '')}' is missing or empty"
|
||||
)
|
||||
# if double key -> abort
|
||||
if search.get("Key") in keys:
|
||||
if search.get("key") in keys:
|
||||
raise KeyError(
|
||||
f"Key {search.get('Key', '')} already exists in search_params"
|
||||
f"Key {search.get('key', '')} already exists in search_params"
|
||||
)
|
||||
keys.append(str(search['key']))
|
||||
|
||||
return_items: list[dict[str, Any]] = []
|
||||
for si_idx, search_item in enumerate(data):
|
||||
@@ -55,20 +75,20 @@ def array_search(
|
||||
# lower case left side
|
||||
# TODO: allow nested Keys. eg "Key: ["Key a", "key b"]" to be ["Key a"]["key b"]
|
||||
if search.get("case_sensitive", True) is False:
|
||||
search_value = search_item.get(str(search['Key']), "").lower()
|
||||
search_value = search_item.get(str(search['key']), "").lower()
|
||||
else:
|
||||
search_value = search_item.get(str(search['Key']), "")
|
||||
search_value = search_item.get(str(search['key']), "")
|
||||
# lower case right side
|
||||
if isinstance(search['Value'], list):
|
||||
if isinstance(search['value'], list):
|
||||
search_in = [
|
||||
str(k).lower()
|
||||
if search.get("case_sensitive", True) is False else k
|
||||
for k in search['Value']
|
||||
]
|
||||
str(k).lower()
|
||||
if search.get("case_sensitive", True) is False else k
|
||||
for k in search['value']
|
||||
]
|
||||
elif search.get("case_sensitive", True) is False:
|
||||
search_in = str(search['Value']).lower()
|
||||
search_in = str(search['value']).lower()
|
||||
else:
|
||||
search_in = search['Value']
|
||||
search_in = search['value']
|
||||
# compare check
|
||||
if (
|
||||
(
|
||||
@@ -60,4 +60,22 @@ def build_dict(
|
||||
delete_keys_from_set(any_dict, ignore_entries)
|
||||
)
|
||||
|
||||
|
||||
def set_entry(dict_set: dict[str, Any], key: str, value_set: Any) -> dict[str, Any]:
|
||||
"""
|
||||
set a new entry in the dict set
|
||||
|
||||
Arguments:
|
||||
key {str} -- _description_
|
||||
dict_set {dict[str, Any]} -- _description_
|
||||
value_set {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
dict[str, Any] -- _description_
|
||||
"""
|
||||
if not dict_set.get(key):
|
||||
dict_set[key] = {}
|
||||
dict_set[key] = value_set
|
||||
return dict_set
|
||||
|
||||
# __END__
|
||||
85
src/corelibs/iterator_handling/dict_mask.py
Normal file
85
src/corelibs/iterator_handling/dict_mask.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
Dict helpers
|
||||
"""
|
||||
|
||||
|
||||
from typing import TypeAlias, Union, Dict, List, Any, cast
|
||||
|
||||
# definitions for the mask run below
|
||||
MaskableValue: TypeAlias = Union[str, int, float, bool, None]
|
||||
NestedDict: TypeAlias = Dict[str, Union[MaskableValue, List[Any], 'NestedDict']]
|
||||
ProcessableValue: TypeAlias = Union[MaskableValue, List[Any], NestedDict]
|
||||
|
||||
|
||||
def mask(
|
||||
data_set: dict[str, Any],
|
||||
mask_keys: list[str] | None = None,
|
||||
mask_str: str = "***",
|
||||
mask_str_edges: str = '_',
|
||||
skip: bool = False
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
mask data for output
|
||||
Checks if mask_keys list exist in any key in the data set either from the start or at the end
|
||||
|
||||
Use the mask_str_edges to define how searches inside a string should work. Default it must start
|
||||
and end with '_', remove to search string in string
|
||||
|
||||
Arguments:
|
||||
data_set {dict[str, str]} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
mask_keys {list[str] | None} -- _description_ (default: {None})
|
||||
mask_str {str} -- _description_ (default: {"***"})
|
||||
mask_str_edges {str} -- _description_ (default: {"_"})
|
||||
skip {bool} -- if set to true skip (default: {False})
|
||||
|
||||
Returns:
|
||||
dict[str, str] -- _description_
|
||||
"""
|
||||
if skip is True:
|
||||
return data_set
|
||||
if mask_keys is None:
|
||||
mask_keys = ["encryption", "password", "secret"]
|
||||
else:
|
||||
# make sure it is lower case
|
||||
mask_keys = [mask_key.lower() for mask_key in mask_keys]
|
||||
|
||||
def should_mask_key(key: str) -> bool:
|
||||
"""Check if a key should be masked"""
|
||||
__key_lower = key.lower()
|
||||
return any(
|
||||
__key_lower.startswith(mask_key) or
|
||||
__key_lower.endswith(mask_key) or
|
||||
f"{mask_str_edges}{mask_key}{mask_str_edges}" in __key_lower
|
||||
for mask_key in mask_keys
|
||||
)
|
||||
|
||||
def mask_recursive(obj: ProcessableValue) -> ProcessableValue:
|
||||
"""Recursively mask values in nested structures"""
|
||||
if isinstance(obj, dict):
|
||||
return {
|
||||
key: mask_value(value) if should_mask_key(key) else mask_recursive(value)
|
||||
for key, value in obj.items()
|
||||
}
|
||||
if isinstance(obj, list):
|
||||
return [mask_recursive(item) for item in obj]
|
||||
return obj
|
||||
|
||||
def mask_value(value: Any) -> Any:
|
||||
"""Handle masking based on value type"""
|
||||
if isinstance(value, list):
|
||||
# Mask each individual value in the list
|
||||
return [mask_str for _ in cast('list[Any]', value)]
|
||||
if isinstance(value, dict):
|
||||
# Recursively process the dictionary instead of masking the whole thing
|
||||
return mask_recursive(cast('ProcessableValue', value))
|
||||
# Mask primitive values
|
||||
return mask_str
|
||||
|
||||
return {
|
||||
key: mask_value(value) if should_mask_key(key) else mask_recursive(value)
|
||||
for key, value in data_set.items()
|
||||
}
|
||||
|
||||
# __END__
|
||||
47
src/corelibs/iterator_handling/list_helpers.py
Normal file
47
src/corelibs/iterator_handling/list_helpers.py
Normal file
@@ -0,0 +1,47 @@
|
||||
"""
|
||||
List type helpers
|
||||
"""
|
||||
|
||||
from typing import Any, Sequence
|
||||
|
||||
|
||||
def convert_to_list(
|
||||
entry: str | int | float | bool | Sequence[str | int | float | bool | Sequence[Any]]
|
||||
) -> Sequence[str | int | float | bool | Sequence[Any]]:
|
||||
"""
|
||||
Convert any of the non list values (except dictionary) to a list
|
||||
|
||||
Arguments:
|
||||
entry {str | int | float | bool | list[str | int | float | bool]} -- _description_
|
||||
|
||||
Returns:
|
||||
list[str | int | float | bool] -- _description_
|
||||
"""
|
||||
if isinstance(entry, list):
|
||||
return entry
|
||||
return [entry]
|
||||
|
||||
|
||||
def is_list_in_list(
|
||||
list_a: Sequence[str | int | float | bool | Sequence[Any]],
|
||||
list_b: Sequence[str | int | float | bool | Sequence[Any]]
|
||||
) -> Sequence[str | int | float | bool | Sequence[Any]]:
|
||||
"""
|
||||
Return entries from list_a that are not in list_b
|
||||
Type safe compare
|
||||
|
||||
Arguments:
|
||||
list_a {list[Any]} -- _description_
|
||||
list_b {list[Any]} -- _description_
|
||||
|
||||
Returns:
|
||||
list[Any] -- _description_
|
||||
"""
|
||||
# Create sets of (value, type) tuples
|
||||
set_a = set((item, type(item)) for item in list_a)
|
||||
set_b = set((item, type(item)) for item in list_b)
|
||||
|
||||
# Get the difference and extract just the values
|
||||
return [item for item, _ in set_a - set_b]
|
||||
|
||||
# __END__
|
||||
@@ -28,8 +28,12 @@ def jmespath_search(search_data: dict[Any, Any] | list[Any], search_params: str)
|
||||
raise ValueError(f"Compile failed: {search_params}: {excp}") from excp
|
||||
except jmespath.exceptions.ParseError as excp:
|
||||
raise ValueError(f"Parse failed: {search_params}: {excp}") from excp
|
||||
except jmespath.exceptions.JMESPathTypeError as excp:
|
||||
raise ValueError(f"Search failed with JMESPathTypeError: {search_params}: {excp}") from excp
|
||||
except TypeError as excp:
|
||||
raise ValueError(f"Type error for search_params: {excp}") from excp
|
||||
return search_result
|
||||
|
||||
# TODO: compile jmespath setup
|
||||
|
||||
# __END__
|
||||
|
||||
@@ -3,15 +3,17 @@ json encoder for datetime
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
from json import JSONEncoder
|
||||
from json import JSONEncoder, dumps
|
||||
from datetime import datetime, date
|
||||
import copy
|
||||
from jsonpath_ng import parse # pyright: ignore[reportMissingTypeStubs, reportUnknownVariableType]
|
||||
|
||||
|
||||
# subclass JSONEncoder
|
||||
class DateTimeEncoder(JSONEncoder):
|
||||
"""
|
||||
Override the default method
|
||||
cls=DateTimeEncoder
|
||||
dumps(..., cls=DateTimeEncoder, ...)
|
||||
"""
|
||||
def default(self, o: Any) -> str | None:
|
||||
if isinstance(o, (date, datetime)):
|
||||
@@ -19,13 +21,44 @@ class DateTimeEncoder(JSONEncoder):
|
||||
return None
|
||||
|
||||
|
||||
def default(obj: Any) -> str | None:
|
||||
def default_isoformat(obj: Any) -> str | None:
|
||||
"""
|
||||
default override
|
||||
default=default
|
||||
dumps(..., default=default, ...)
|
||||
"""
|
||||
if isinstance(obj, (date, datetime)):
|
||||
return obj.isoformat()
|
||||
return None
|
||||
|
||||
|
||||
def json_dumps(data: Any):
|
||||
"""
|
||||
wrapper for json.dumps with sure dump without throwing Exceptions
|
||||
|
||||
Arguments:
|
||||
data {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
_type_ -- _description_
|
||||
"""
|
||||
return dumps(data, ensure_ascii=False, default=str)
|
||||
|
||||
|
||||
def modify_with_jsonpath(data: dict[Any, Any], path: str, new_value: Any):
|
||||
"""
|
||||
Modify dictionary using JSONPath (more powerful than JMESPath for modifications)
|
||||
"""
|
||||
result = copy.deepcopy(data)
|
||||
jsonpath_expr = parse(path) # pyright: ignore[reportUnknownVariableType]
|
||||
|
||||
# Find and update all matches
|
||||
matches = jsonpath_expr.find(result) # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType]
|
||||
for match in matches: # pyright: ignore[reportUnknownVariableType]
|
||||
match.full_path.update(result, new_value) # pyright: ignore[reportUnknownMemberType]
|
||||
|
||||
return result
|
||||
|
||||
# __END__
|
||||
|
||||
|
||||
# __END__
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
"""
|
||||
Dict helpers
|
||||
"""
|
||||
|
||||
|
||||
def mask(
|
||||
data_set: dict[str, str],
|
||||
mask_keys: list[str] | None = None,
|
||||
mask_str: str = "***",
|
||||
skip: bool = False
|
||||
) -> dict[str, str]:
|
||||
"""
|
||||
mask data for output
|
||||
Checks if mask_keys list exist in any key in the data set either from the start or at the end
|
||||
|
||||
Arguments:
|
||||
data_set {dict[str, str]} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
mask_keys {list[str] | None} -- _description_ (default: {None})
|
||||
mask_str {str} -- _description_ (default: {"***"})
|
||||
skip {bool} -- _description_ (default: {False})
|
||||
|
||||
Returns:
|
||||
dict[str, str] -- _description_
|
||||
"""
|
||||
if skip is True:
|
||||
return data_set
|
||||
if mask_keys is None:
|
||||
mask_keys = ["password", "secret"]
|
||||
return {
|
||||
key: mask_str
|
||||
if any(key.startswith(mask_key) or key.endswith(mask_key) for mask_key in mask_keys) else value
|
||||
for key, value in data_set.items()
|
||||
}
|
||||
|
||||
# __END__
|
||||
@@ -1,91 +1,276 @@
|
||||
"""
|
||||
A log handler wrapper
|
||||
if log_settings['log_queue'] is set to multiprocessing.Queue it will launch with listeners
|
||||
attach "init_worker_logging" with the set log_queue
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging.handlers
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Mapping
|
||||
import atexit
|
||||
from enum import Flag, auto
|
||||
from typing import MutableMapping, TextIO, TypedDict, Any, TYPE_CHECKING, cast
|
||||
from corelibs.logging_handling.logging_level_handling.logging_level import LoggingLevel
|
||||
from corelibs.string_handling.text_colors import Colors
|
||||
from corelibs.debug_handling.debug_helpers import call_stack, exception_stack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from multiprocessing import Queue
|
||||
|
||||
|
||||
class Log:
|
||||
class ConsoleFormat(Flag):
|
||||
"""console format type bitmap flags"""
|
||||
TIME = auto()
|
||||
TIME_SECONDS = auto()
|
||||
TIME_MILLISECONDS = auto()
|
||||
TIME_MICROSECONDS = auto()
|
||||
TIMEZONE = auto()
|
||||
NAME = auto()
|
||||
FILE = auto()
|
||||
FUNCTION = auto()
|
||||
LINENO = auto()
|
||||
|
||||
|
||||
class ConsoleFormatSettings:
|
||||
"""Console format quick settings groups"""
|
||||
# shows everything, time with milliseconds, and time zone, log name, file, function, line number
|
||||
ALL = (
|
||||
ConsoleFormat.TIME |
|
||||
ConsoleFormat.TIMEZONE |
|
||||
ConsoleFormat.NAME |
|
||||
ConsoleFormat.FILE |
|
||||
ConsoleFormat.FUNCTION |
|
||||
ConsoleFormat.LINENO
|
||||
)
|
||||
# show time with no time zone, file and line
|
||||
CONDENSED = ConsoleFormat.TIME | ConsoleFormat.FILE | ConsoleFormat.LINENO
|
||||
# only time
|
||||
MINIMAL = ConsoleFormat.TIME
|
||||
# only message
|
||||
BARE = ConsoleFormat(0)
|
||||
|
||||
|
||||
# MARK: Log settings TypedDict
|
||||
class LogSettings(TypedDict):
|
||||
"""log settings, for Log setup"""
|
||||
log_level_console: LoggingLevel
|
||||
log_level_file: LoggingLevel
|
||||
per_run_log: bool
|
||||
console_enabled: bool
|
||||
console_color_output_enabled: bool
|
||||
console_format_type: ConsoleFormat
|
||||
add_start_info: bool
|
||||
add_end_info: bool
|
||||
log_queue: 'Queue[str] | None'
|
||||
|
||||
|
||||
class LoggerInit(TypedDict):
|
||||
"""for Logger init"""
|
||||
logger: logging.Logger
|
||||
log_queue: 'Queue[str] | None'
|
||||
|
||||
|
||||
# MARK: Custom color filter
|
||||
class CustomConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
logger setup
|
||||
Custom formatter with colors for console output
|
||||
"""
|
||||
|
||||
EXCEPTION: int = 60
|
||||
COLORS = {
|
||||
LoggingLevel.DEBUG.name: Colors.cyan,
|
||||
LoggingLevel.INFO.name: Colors.green,
|
||||
LoggingLevel.WARNING.name: Colors.yellow,
|
||||
LoggingLevel.ERROR.name: Colors.red,
|
||||
LoggingLevel.CRITICAL.name: Colors.red_bold,
|
||||
LoggingLevel.ALERT.name: Colors.yellow_bold,
|
||||
LoggingLevel.EMERGENCY.name: Colors.magenta_bold,
|
||||
LoggingLevel.EXCEPTION.name: Colors.magenta_bright, # will never be written to console
|
||||
}
|
||||
|
||||
def __init__(
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""
|
||||
set the color highlight
|
||||
|
||||
Arguments:
|
||||
record {logging.LogRecord} -- _description_
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
# Add color to levelname for console output
|
||||
reset = Colors.reset
|
||||
color = self.COLORS.get(record.levelname, reset)
|
||||
# only highlight level for basic
|
||||
if record.levelname in [LoggingLevel.DEBUG.name, LoggingLevel.INFO.name]:
|
||||
record.levelname = f"{color}{record.levelname}{reset}"
|
||||
return super().format(record)
|
||||
# highlight whole line
|
||||
message = super().format(record)
|
||||
return f"{color}{message}{reset}"
|
||||
|
||||
# TODO: add custom handlers for stack_trace, if not set fill with %(filename)s:%(funcName)s:%(lineno)d
|
||||
# hasattr(record, 'stack_trace')
|
||||
# also for something like "context" where we add an array of anything to a message
|
||||
|
||||
|
||||
class CustomHandlerFilter(logging.Filter):
|
||||
"""
|
||||
Add a custom handler for filtering
|
||||
"""
|
||||
HANDLER_NAME_FILTER_EXCEPTION: str = 'console'
|
||||
|
||||
def __init__(self, handler_name: str, filter_exceptions: bool = False):
|
||||
super().__init__(name=handler_name)
|
||||
self.handler_name = handler_name
|
||||
self.filter_exceptions = filter_exceptions
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
# if console and exception do not show
|
||||
if self.handler_name == self.HANDLER_NAME_FILTER_EXCEPTION and self.filter_exceptions:
|
||||
return record.levelname != "EXCEPTION"
|
||||
# if cnosole entry is true and traget file filter
|
||||
if hasattr(record, 'console') and getattr(record, 'console') is True and self.handler_name == 'file':
|
||||
return False
|
||||
return True
|
||||
|
||||
# def __filter_exceptions(self, record: logging.LogRecord) -> bool:
|
||||
# return record.levelname != "EXCEPTION"
|
||||
|
||||
|
||||
# MARK: Parent class
|
||||
class LogParent:
|
||||
"""
|
||||
Parent class with general methods
|
||||
used by Log and Logger
|
||||
"""
|
||||
|
||||
# spacer lenght characters and the character
|
||||
SPACER_CHAR: str = '='
|
||||
SPACER_LENGTH: int = 32
|
||||
|
||||
def __init__(self):
|
||||
self.logger: logging.Logger
|
||||
self.log_queue: 'Queue[str] | None' = None
|
||||
self.handlers: dict[str, Any] = {}
|
||||
|
||||
# FIXME: we need to add a custom formater to add stack level listing if we want to
|
||||
# Important note, although they exist, it is recommended to use self.logger.NAME directly
|
||||
# so that the correct filename, method and row number is set
|
||||
# for > 50 use logger.log(LoggingLevel.<LEVEL>.value, ...)
|
||||
# for exception logger.log(LoggingLevel.EXCEPTION.value, ..., execInfo=True)
|
||||
# MARK: log message
|
||||
def log(self, level: int, msg: object, *args: object, extra: MutableMapping[str, object] | None = None):
|
||||
"""log general"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.log(level, msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: DEBUG 10
|
||||
def debug(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""debug"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.debug(msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: INFO 20
|
||||
def info(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""info"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.info(msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: WARNING 30
|
||||
def warning(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""warning"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.warning(msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: ERROR 40
|
||||
def error(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""error"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.error(msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: CRITICAL 50
|
||||
def critical(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""critcal"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.critical(msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: ALERT 55
|
||||
def alert(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""alert"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
# extra_dict = dict(extra)
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.log(LoggingLevel.ALERT.value, msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: EMERGECNY: 60
|
||||
def emergency(self, msg: object, *args: object, extra: MutableMapping[str, object] | None = None) -> None:
|
||||
"""emergency"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
self.logger.log(LoggingLevel.EMERGENCY.value, msg, *args, extra=extra, stacklevel=2)
|
||||
|
||||
# MARK: EXCEPTION: 70
|
||||
def exception(
|
||||
self,
|
||||
log_path: Path,
|
||||
log_name: str,
|
||||
log_level_console: str = 'WARNING',
|
||||
log_level_file: str = 'DEBUG',
|
||||
add_start_info: bool = True
|
||||
):
|
||||
logging.addLevelName(Log.EXCEPTION, 'EXCEPTION')
|
||||
if not log_name.endswith('.log'):
|
||||
log_path = log_path.with_suffix('.log')
|
||||
# overall logger settings
|
||||
self.logger = logging.getLogger(log_name)
|
||||
# set maximum logging level for all logging output
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
msg: object, *args: object, extra: MutableMapping[str, object] | None = None,
|
||||
log_error: bool = True
|
||||
) -> None:
|
||||
"""
|
||||
log on exceotion level, this is log.exception, but logs with a new level
|
||||
|
||||
# self.handlers = []
|
||||
# console logger
|
||||
self.__console_handler(log_level_console)
|
||||
# file logger
|
||||
self.__file_handler(log_level_file, log_path)
|
||||
# if requests set a start log
|
||||
if add_start_info is True:
|
||||
self.break_line('START')
|
||||
|
||||
def __filter_exceptions(self, record: logging.LogRecord) -> bool:
|
||||
return record.levelname != "EXCEPTION"
|
||||
|
||||
def __console_handler(self, log_level_console: str = 'WARNING'):
|
||||
# console logger
|
||||
if not isinstance(getattr(logging, log_level_console.upper(), None), int):
|
||||
log_level_console = 'WARNING'
|
||||
console_handler = logging.StreamHandler()
|
||||
formatter_console = logging.Formatter(
|
||||
(
|
||||
'[%(asctime)s.%(msecs)03d] '
|
||||
'[%(filename)s:%(funcName)s:%(lineno)d] '
|
||||
'<%(levelname)s> '
|
||||
'%(message)s'
|
||||
),
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
console_handler.setLevel(log_level_console)
|
||||
# do not show exceptions logs on console
|
||||
console_handler.addFilter(self.__filter_exceptions)
|
||||
console_handler.setFormatter(formatter_console)
|
||||
self.logger.addHandler(console_handler)
|
||||
|
||||
def __file_handler(self, log_level_file: str, log_path: Path) -> None:
|
||||
# file logger
|
||||
if not isinstance(getattr(logging, log_level_file.upper(), None), int):
|
||||
log_level_file = 'DEBUG'
|
||||
file_handler = logging.handlers.TimedRotatingFileHandler(
|
||||
filename=log_path,
|
||||
encoding="utf-8",
|
||||
when="D",
|
||||
interval=1
|
||||
)
|
||||
formatter_file_handler = logging.Formatter(
|
||||
(
|
||||
'[%(asctime)s.%(msecs)03d] '
|
||||
'[%(name)s:%(process)d] '
|
||||
'[%(pathname)s:%(funcName)s:%(lineno)d] '
|
||||
'<%(levelname)s> '
|
||||
'%(message)s'
|
||||
),
|
||||
datefmt="%Y-%m-%dT%H:%M:%S",
|
||||
)
|
||||
file_handler.setLevel(log_level_file)
|
||||
file_handler.setFormatter(formatter_file_handler)
|
||||
self.logger.addHandler(file_handler)
|
||||
Args:
|
||||
msg (object): _description_
|
||||
*args (object): arguments for msg
|
||||
extra: Mapping[str, object] | None: extra arguments for the formatting if needed
|
||||
log_error: (bool): If set to false will not write additional error message for console (Default True)
|
||||
"""
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
if extra is None:
|
||||
extra = {}
|
||||
extra['stack_trace'] = call_stack(skip_last=2)
|
||||
extra['exception_trace'] = exception_stack()
|
||||
# write to console first with extra flag for filtering in file
|
||||
if log_error:
|
||||
self.logger.log(
|
||||
LoggingLevel.ERROR.value,
|
||||
f"<=EXCEPTION={extra['exception_trace']}> {msg} [{extra['stack_trace']}]",
|
||||
*args, extra=dict(extra) | {'console': True}, stacklevel=2
|
||||
)
|
||||
self.logger.log(LoggingLevel.EXCEPTION.value, msg, *args, exc_info=True, extra=extra, stacklevel=2)
|
||||
|
||||
def break_line(self, info: str = "BREAK"):
|
||||
"""
|
||||
@@ -94,29 +279,561 @@ class Log:
|
||||
Keyword Arguments:
|
||||
info {str} -- _description_ (default: {"BREAK"})
|
||||
"""
|
||||
self.logger.info("[%s] ================================>", info)
|
||||
if not hasattr(self, 'logger'):
|
||||
raise ValueError('Logger is not yet initialized')
|
||||
self.logger.info("[%s] %s>", info, self.SPACER_CHAR * self.SPACER_LENGTH)
|
||||
|
||||
def exception(self, msg: object, *args: object, extra: Mapping[str, object] | None = None) -> None:
|
||||
# MARK: queue handling
|
||||
def flush(self, handler_name: str | None = None, timeout: float = 2.0) -> bool:
|
||||
"""
|
||||
log on exceotion level
|
||||
Flush all pending messages
|
||||
|
||||
Keyword Arguments:
|
||||
handler_name {str | None} -- _description_ (default: {None})
|
||||
timeout {float} -- _description_ (default: {2.0})
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
if not self.log_queue:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Wait for queue to be processed
|
||||
start_time = time.time()
|
||||
while not self.log_queue.empty() and (time.time() - start_time) < timeout:
|
||||
time.sleep(0.01)
|
||||
|
||||
# Flush all handlers or handler given
|
||||
if handler_name:
|
||||
try:
|
||||
self.handlers[handler_name].flush()
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
for handler in self.handlers.values():
|
||||
handler.flush()
|
||||
except OSError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup for any open queues in case we have an abort
|
||||
"""
|
||||
if not self.log_queue:
|
||||
return
|
||||
self.flush()
|
||||
# Close the queue properly
|
||||
self.log_queue.close()
|
||||
self.log_queue.join_thread()
|
||||
|
||||
# MARK: log level handling
|
||||
def set_log_level(self, handler_name: str, log_level: LoggingLevel) -> bool:
|
||||
"""
|
||||
set the logging level for a handler
|
||||
|
||||
Arguments:
|
||||
handler {str} -- _description_
|
||||
log_level {LoggingLevel} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
try:
|
||||
# flush queue befoe changing logging level
|
||||
self.flush(handler_name)
|
||||
self.handlers[handler_name].setLevel(log_level.name)
|
||||
return True
|
||||
except IndexError:
|
||||
if self.logger:
|
||||
self.logger.error('Handler %s not found, cannot change log level', handler_name)
|
||||
return False
|
||||
except AttributeError:
|
||||
if self.logger:
|
||||
self.logger.error(
|
||||
'Cannot change to log level %s for handler %s, log level invalid',
|
||||
LoggingLevel.name, handler_name
|
||||
)
|
||||
return False
|
||||
|
||||
def get_log_level(self, handler_name: str) -> LoggingLevel:
|
||||
"""
|
||||
gettthe logging level for a handler
|
||||
|
||||
Arguments:
|
||||
handler_name {str} -- _description_
|
||||
|
||||
Returns:
|
||||
LoggingLevel -- _description_
|
||||
"""
|
||||
try:
|
||||
return LoggingLevel.from_any(self.handlers[handler_name].level)
|
||||
except IndexError:
|
||||
return LoggingLevel.NOTSET
|
||||
|
||||
@staticmethod
|
||||
def validate_log_level(log_level: Any) -> bool:
|
||||
"""
|
||||
if the log level is invalid will return false, else return true
|
||||
|
||||
Args:
|
||||
msg (object): _description_
|
||||
*args (object): arguments for msg
|
||||
extra: Mapping[str, object] | None: extra arguments for the formatting if needed
|
||||
"""
|
||||
self.logger.log(Log.EXCEPTION, msg, *args, exc_info=True, extra=extra)
|
||||
|
||||
def validate_log_level(self, log_level: str) -> bool:
|
||||
"""
|
||||
if the log level is invalid, will erturn false
|
||||
|
||||
Args:
|
||||
log_level (str): _description_
|
||||
log_level (Any): _description_
|
||||
|
||||
Returns:
|
||||
bool: _description_
|
||||
"""
|
||||
return isinstance(getattr(logging, log_level.upper(), None), int)
|
||||
try:
|
||||
_ = LoggingLevel.from_any(log_level).value
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_log_level_int(log_level: Any) -> int:
|
||||
"""
|
||||
Return log level as INT
|
||||
If invalid returns the default log level
|
||||
|
||||
Arguments:
|
||||
log_level {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
int -- _description_
|
||||
"""
|
||||
try:
|
||||
return LoggingLevel.from_any(log_level).value
|
||||
except ValueError:
|
||||
return LoggingLevel.from_string(Log.DEFAULT_LOG_LEVEL.name).value
|
||||
|
||||
|
||||
# MARK: Logger
|
||||
class Logger(LogParent):
|
||||
"""
|
||||
The class we can pass on to other clases without re-init the class itself
|
||||
NOTE: if no queue object is handled over the logging level change might not take immediate effect
|
||||
"""
|
||||
|
||||
def __init__(self, logger_settings: LoggerInit):
|
||||
LogParent.__init__(self)
|
||||
self.logger = logger_settings['logger']
|
||||
self.lg = self.logger
|
||||
self.l = self.logger
|
||||
self.handlers = {str(_handler.name): _handler for _handler in self.logger.handlers}
|
||||
self.log_queue = logger_settings['log_queue']
|
||||
|
||||
|
||||
# MARK: LogSetup class
|
||||
class Log(LogParent):
|
||||
"""
|
||||
logger setup
|
||||
"""
|
||||
|
||||
# spacer lenght characters and the character
|
||||
SPACER_CHAR: str = '='
|
||||
SPACER_LENGTH: int = 32
|
||||
# default logging level
|
||||
DEFAULT_LOG_LEVEL: LoggingLevel = LoggingLevel.WARNING
|
||||
DEFAULT_LOG_LEVEL_FILE: LoggingLevel = LoggingLevel.DEBUG
|
||||
DEFAULT_LOG_LEVEL_CONSOLE: LoggingLevel = LoggingLevel.WARNING
|
||||
# default settings
|
||||
DEFAULT_LOG_SETTINGS: LogSettings = {
|
||||
"log_level_console": DEFAULT_LOG_LEVEL_CONSOLE,
|
||||
"log_level_file": DEFAULT_LOG_LEVEL_FILE,
|
||||
"per_run_log": False,
|
||||
"console_enabled": True,
|
||||
"console_color_output_enabled": True,
|
||||
# do not print log title, file, function and line number
|
||||
"console_format_type": ConsoleFormatSettings.ALL,
|
||||
"add_start_info": True,
|
||||
"add_end_info": False,
|
||||
"log_queue": None,
|
||||
}
|
||||
|
||||
# MARK: constructor
|
||||
def __init__(
|
||||
self,
|
||||
log_path: Path,
|
||||
log_name: str,
|
||||
log_settings: (
|
||||
dict[str, 'LoggingLevel | str | bool | None | Queue[str] | ConsoleFormat'] | # noqa: E501 # pylint: disable=line-too-long
|
||||
LogSettings | None
|
||||
) = None,
|
||||
other_handlers: dict[str, Any] | None = None
|
||||
):
|
||||
LogParent.__init__(self)
|
||||
# add new level for alert, emergecny and exception
|
||||
logging.addLevelName(LoggingLevel.ALERT.value, LoggingLevel.ALERT.name)
|
||||
logging.addLevelName(LoggingLevel.EMERGENCY.value, LoggingLevel.EMERGENCY.name)
|
||||
logging.addLevelName(LoggingLevel.EXCEPTION.value, LoggingLevel.EXCEPTION.name)
|
||||
# parse the logging settings
|
||||
self.log_settings = self.__parse_log_settings(log_settings)
|
||||
# if path, set log name with .log
|
||||
# if log name with .log, strip .log for naming
|
||||
if log_path.is_dir():
|
||||
__log_file_name = re.sub(r'[^a-zA-Z0-9]', '', log_name)
|
||||
if not log_name.endswith('.log'):
|
||||
log_path = log_path.joinpath(Path(__log_file_name).with_suffix('.log'))
|
||||
else:
|
||||
log_path = log_path.joinpath(__log_file_name)
|
||||
elif not log_path.suffix == '.log':
|
||||
# add .log if the path is a file but without .log
|
||||
log_path = log_path.with_suffix('.log')
|
||||
# stip .log from the log name if set
|
||||
if not log_name.endswith('.log'):
|
||||
log_name = Path(log_name).stem
|
||||
# general log name
|
||||
self.log_name = log_name
|
||||
|
||||
self.log_queue: 'Queue[str] | None' = None
|
||||
self.listener: logging.handlers.QueueListener | None = None
|
||||
self.logger: logging.Logger
|
||||
|
||||
# setup handlers
|
||||
# NOTE if console with color is set first, some of the color formatting is set
|
||||
# in the file writer too, for the ones where color is set BEFORE the format
|
||||
# Any is logging.StreamHandler, logging.FileHandler and all logging.handlers.*
|
||||
self.handlers: dict[str, Any] = {}
|
||||
self.add_handler('file_handler', self.__create_file_handler(
|
||||
'file_handler', self.log_settings['log_level_file'], log_path)
|
||||
)
|
||||
if self.log_settings['console_enabled']:
|
||||
# console
|
||||
self.add_handler('stream_handler', self.__create_console_handler(
|
||||
'stream_handler',
|
||||
self.log_settings['log_level_console'],
|
||||
console_format_type=self.log_settings['console_format_type'],
|
||||
))
|
||||
# add other handlers,
|
||||
if other_handlers is not None:
|
||||
for handler_key, handler in other_handlers.items():
|
||||
self.add_handler(handler_key, handler)
|
||||
# init listener if we have a log_queue set
|
||||
self.__init_listener(self.log_settings['log_queue'])
|
||||
|
||||
# overall logger start
|
||||
self.__init_log(log_name)
|
||||
# if requests set a start log
|
||||
if self.log_settings['add_start_info'] is True:
|
||||
self.break_line('START')
|
||||
|
||||
# MARK: deconstructor
|
||||
def __del__(self):
|
||||
"""
|
||||
Call when class is destroyed, make sure the listender is closed or else we throw a thread error
|
||||
"""
|
||||
if hasattr(self, 'log_settings') and self.log_settings.get('add_end_info'):
|
||||
self.break_line('END')
|
||||
self.stop_listener()
|
||||
|
||||
# MARK: parse log settings
|
||||
def __parse_log_settings(
|
||||
self,
|
||||
log_settings: dict[str, 'LoggingLevel | str | bool | None | Queue[str] | ConsoleFormat'] | # noqa: E501 # pylint: disable=line-too-long
|
||||
LogSettings | None
|
||||
) -> LogSettings:
|
||||
# skip with defaul it not set
|
||||
if log_settings is None:
|
||||
return self.DEFAULT_LOG_SETTINGS
|
||||
# check entries
|
||||
default_log_settings = self.DEFAULT_LOG_SETTINGS
|
||||
# check log levels
|
||||
for __log_entry in ['log_level_console', 'log_level_file']:
|
||||
if log_settings.get(__log_entry) is None:
|
||||
continue
|
||||
# if not valid reset to default, if not in default set to WARNING
|
||||
if not self.validate_log_level(__log_level := log_settings.get(__log_entry, '')):
|
||||
__log_level = self.DEFAULT_LOG_SETTINGS.get(
|
||||
__log_entry, self.DEFAULT_LOG_LEVEL
|
||||
)
|
||||
default_log_settings[__log_entry] = LoggingLevel.from_any(__log_level)
|
||||
# check bool
|
||||
for __log_entry in [
|
||||
"per_run_log",
|
||||
"console_enabled",
|
||||
"console_color_output_enabled",
|
||||
"add_start_info",
|
||||
"add_end_info",
|
||||
]:
|
||||
if log_settings.get(__log_entry) is None:
|
||||
continue
|
||||
if not isinstance(__setting := log_settings.get(__log_entry, ''), bool):
|
||||
__setting = self.DEFAULT_LOG_SETTINGS.get(__log_entry, True)
|
||||
default_log_settings[__log_entry] = __setting
|
||||
# check console log type
|
||||
default_log_settings['console_format_type'] = cast('ConsoleFormat', log_settings.get(
|
||||
'console_format_type', self.DEFAULT_LOG_SETTINGS['console_format_type']
|
||||
))
|
||||
# check log queue
|
||||
__setting = log_settings.get('log_queue', self.DEFAULT_LOG_SETTINGS['log_queue'])
|
||||
if __setting is not None:
|
||||
__setting = cast('Queue[str]', __setting)
|
||||
default_log_settings['log_queue'] = __setting
|
||||
return default_log_settings
|
||||
|
||||
# def __filter_exceptions(self, record: logging.LogRecord) -> bool:
|
||||
# return record.levelname != "EXCEPTION"
|
||||
|
||||
# MARK: add a handler
|
||||
def add_handler(
|
||||
self,
|
||||
handler_name: str,
|
||||
handler: Any
|
||||
) -> bool:
|
||||
"""
|
||||
Add a log handler to the handlers dict
|
||||
|
||||
Arguments:
|
||||
handler_name {str} -- _description_
|
||||
handler {Any} -- _description_
|
||||
"""
|
||||
if self.handlers.get(handler_name):
|
||||
return False
|
||||
if self.listener is not None or hasattr(self, 'logger'):
|
||||
raise ValueError(
|
||||
f"Cannot add handler {handler_name}: {handler.get_name()} because logger is already running"
|
||||
)
|
||||
# TODO: handler must be some handler type, how to check?
|
||||
self.handlers[handler_name] = handler
|
||||
return True
|
||||
|
||||
# MARK: console handler
|
||||
def __create_console_handler(
|
||||
self, handler_name: str,
|
||||
log_level_console: LoggingLevel = LoggingLevel.WARNING,
|
||||
filter_exceptions: bool = True,
|
||||
console_format_type: ConsoleFormat = ConsoleFormatSettings.ALL,
|
||||
) -> logging.StreamHandler[TextIO]:
|
||||
# console logger
|
||||
if not self.validate_log_level(log_level_console):
|
||||
log_level_console = self.DEFAULT_LOG_LEVEL_CONSOLE
|
||||
console_handler = logging.StreamHandler()
|
||||
print(f"Console format type: {console_format_type}")
|
||||
# build the format string based on what flags are set
|
||||
format_string = ''
|
||||
# time part if any of the times are requested
|
||||
if (
|
||||
ConsoleFormat.TIME in console_format_type or
|
||||
ConsoleFormat.TIME_SECONDS in console_format_type or
|
||||
ConsoleFormat.TIME_MILLISECONDS in console_format_type or
|
||||
ConsoleFormat.TIME_MICROSECONDS in console_format_type
|
||||
):
|
||||
format_string += '[%(asctime)s] '
|
||||
# set log name
|
||||
if ConsoleFormat.NAME in console_format_type:
|
||||
format_string += '[%(name)s] '
|
||||
# for any file/function/line number call
|
||||
if (
|
||||
ConsoleFormat.FILE in console_format_type or
|
||||
ConsoleFormat.FUNCTION in console_format_type or
|
||||
ConsoleFormat.LINENO in console_format_type
|
||||
):
|
||||
format_string += '['
|
||||
set_group: list[str] = []
|
||||
if ConsoleFormat.FILE in console_format_type:
|
||||
set_group.append('%(filename)s')
|
||||
if ConsoleFormat.FUNCTION in console_format_type:
|
||||
set_group.append('%(funcName)s')
|
||||
if ConsoleFormat.LINENO in console_format_type:
|
||||
set_group.append('%(lineno)d')
|
||||
format_string += ':'.join(set_group)
|
||||
format_string += '] '
|
||||
# always level + message
|
||||
format_string += '<%(levelname)s> %(message)s'
|
||||
# basic date, but this will be overridden to ISO in formatTime
|
||||
# format_date = "%Y-%m-%d %H:%M:%S"
|
||||
# color or not
|
||||
if self.log_settings['console_color_output_enabled']:
|
||||
# formatter_console = CustomConsoleFormatter(format_string, datefmt=format_date)
|
||||
formatter_console = CustomConsoleFormatter(format_string)
|
||||
else:
|
||||
# formatter_console = logging.Formatter(format_string, datefmt=format_date)
|
||||
formatter_console = logging.Formatter(format_string)
|
||||
# default for TIME is milliseconds
|
||||
# if we have multiple set, the smallest precision wins
|
||||
if ConsoleFormat.TIME_MICROSECONDS in console_format_type:
|
||||
iso_precision = 'microseconds'
|
||||
elif (
|
||||
ConsoleFormat.TIME_MILLISECONDS in console_format_type or
|
||||
ConsoleFormat.TIME in console_format_type
|
||||
):
|
||||
iso_precision = 'milliseconds'
|
||||
elif ConsoleFormat.TIME_SECONDS in console_format_type:
|
||||
iso_precision = 'seconds'
|
||||
else:
|
||||
iso_precision = 'milliseconds'
|
||||
# do timestamp modification only if we have time requested
|
||||
if (
|
||||
ConsoleFormat.TIME in console_format_type or
|
||||
ConsoleFormat.TIME_SECONDS in console_format_type or
|
||||
ConsoleFormat.TIME_MILLISECONDS in console_format_type or
|
||||
ConsoleFormat.TIME_MICROSECONDS in console_format_type
|
||||
):
|
||||
# if we have with TZ we as the asttimezone call
|
||||
if ConsoleFormat.TIMEZONE in console_format_type:
|
||||
formatter_console.formatTime = (
|
||||
lambda record, datefmt=None:
|
||||
datetime
|
||||
.fromtimestamp(record.created)
|
||||
.astimezone()
|
||||
.isoformat(sep=" ", timespec=iso_precision)
|
||||
)
|
||||
else:
|
||||
formatter_console.formatTime = (
|
||||
lambda record, datefmt=None:
|
||||
datetime
|
||||
.fromtimestamp(record.created)
|
||||
.isoformat(sep=" ", timespec=iso_precision)
|
||||
)
|
||||
console_handler.set_name(handler_name)
|
||||
console_handler.setLevel(log_level_console.name)
|
||||
# do not show exceptions logs on console
|
||||
console_handler.addFilter(CustomHandlerFilter('console', filter_exceptions))
|
||||
console_handler.setFormatter(formatter_console)
|
||||
return console_handler
|
||||
|
||||
# MARK: file handler
|
||||
def __create_file_handler(
|
||||
self, handler_name: str,
|
||||
log_level_file: LoggingLevel, log_path: Path,
|
||||
# for TimedRotating, if per_run_log is off
|
||||
when: str = "D", interval: int = 1, backup_count: int = 0
|
||||
) -> logging.handlers.TimedRotatingFileHandler | logging.FileHandler:
|
||||
# file logger
|
||||
# when: S/M/H/D/W0-W6/midnight
|
||||
# interval: how many, 1D = every day
|
||||
# backup_count: how many old to keep, 0 = all
|
||||
if not self.validate_log_level(log_level_file):
|
||||
log_level_file = self.DEFAULT_LOG_LEVEL_FILE
|
||||
if self.log_settings['per_run_log']:
|
||||
# log path, remove them stem (".log"), then add the datetime and add .log again
|
||||
now = datetime.now()
|
||||
# we add microseconds part to get milli seconds
|
||||
new_stem = f"{log_path.stem}.{now.strftime('%Y-%m-%d_%H-%M-%S')}.{str(now.microsecond)[:3]}"
|
||||
file_handler = logging.FileHandler(
|
||||
filename=log_path.with_name(f"{new_stem}{log_path.suffix}"),
|
||||
encoding="utf-8",
|
||||
)
|
||||
else:
|
||||
file_handler = logging.handlers.TimedRotatingFileHandler(
|
||||
filename=log_path,
|
||||
encoding="utf-8",
|
||||
when=when,
|
||||
interval=interval,
|
||||
backupCount=backup_count
|
||||
)
|
||||
formatter_file_handler = logging.Formatter(
|
||||
(
|
||||
# time stamp
|
||||
# '[%(asctime)s.%(msecs)03d] '
|
||||
'[%(asctime)s] '
|
||||
# log name
|
||||
'[%(name)s] '
|
||||
# filename + pid
|
||||
# '[%(filename)s:%(process)d] '
|
||||
# pid + path/filename + func + line number
|
||||
'[%(process)d:%(pathname)s:%(funcName)s:%(lineno)d] '
|
||||
# error level
|
||||
'<%(levelname)s> '
|
||||
# message
|
||||
'%(message)s'
|
||||
),
|
||||
datefmt="%Y-%m-%dT%H:%M:%S",
|
||||
)
|
||||
formatter_file_handler.formatTime = (
|
||||
lambda record, datefmt=None:
|
||||
datetime
|
||||
.fromtimestamp(record.created)
|
||||
.astimezone()
|
||||
.isoformat(sep="T", timespec="microseconds")
|
||||
)
|
||||
file_handler.set_name(handler_name)
|
||||
file_handler.setLevel(log_level_file.name)
|
||||
# do not show errors flagged with console (they are from exceptions)
|
||||
file_handler.addFilter(CustomHandlerFilter('file'))
|
||||
file_handler.setFormatter(formatter_file_handler)
|
||||
return file_handler
|
||||
|
||||
# MARK: init listener
|
||||
def __init_listener(self, log_queue: 'Queue[str] | None' = None):
|
||||
"""
|
||||
If we have a Queue option start the logging queue
|
||||
|
||||
Keyword Arguments:
|
||||
log_queue {Queue[str] | None} -- _description_ (default: {None})
|
||||
"""
|
||||
if log_queue is None:
|
||||
return
|
||||
self.log_queue = log_queue
|
||||
atexit.register(self.stop_listener)
|
||||
self.listener = logging.handlers.QueueListener(
|
||||
self.log_queue,
|
||||
*self.handlers.values(),
|
||||
respect_handler_level=True
|
||||
)
|
||||
self.listener.start()
|
||||
|
||||
def stop_listener(self):
|
||||
"""
|
||||
stop the listener
|
||||
"""
|
||||
if self.listener is not None:
|
||||
self.flush()
|
||||
self.listener.stop()
|
||||
|
||||
# MARK: init main log
|
||||
def __init_log(self, log_name: str) -> None:
|
||||
"""
|
||||
Initialize the main loggger
|
||||
"""
|
||||
queue_handler: logging.handlers.QueueHandler | None = None
|
||||
if self.log_queue is not None:
|
||||
queue_handler = logging.handlers.QueueHandler(self.log_queue)
|
||||
# overall logger settings
|
||||
self.logger = logging.getLogger(log_name)
|
||||
# add all the handlers
|
||||
if queue_handler is None:
|
||||
for handler in self.handlers.values():
|
||||
self.logger.addHandler(handler)
|
||||
else:
|
||||
self.logger.addHandler(queue_handler)
|
||||
# set maximum logging level for all logging output
|
||||
# log level filtering is done per handler
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
# short name
|
||||
self.lg = self.logger
|
||||
self.l = self.logger
|
||||
|
||||
# MARK: init logger for Fork/Thread
|
||||
@staticmethod
|
||||
def init_worker_logging(log_queue: 'Queue[str]') -> logging.Logger:
|
||||
"""
|
||||
This initalizes a logger that can be used in pool/thread queue calls
|
||||
call in worker initializer as "Log.init_worker_logging(Queue[str])
|
||||
"""
|
||||
queue_handler = logging.handlers.QueueHandler(log_queue)
|
||||
# getLogger call MUST be WITHOUT and logger name
|
||||
root_logger = logging.getLogger()
|
||||
# base logging level, filtering is done in the handlers
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
root_logger.handlers.clear()
|
||||
root_logger.addHandler(queue_handler)
|
||||
|
||||
# for debug only
|
||||
root_logger.debug('[LOGGER] Init log: %s - %s', log_queue, root_logger.handlers)
|
||||
|
||||
return root_logger
|
||||
|
||||
def get_logger_settings(self) -> LoggerInit:
|
||||
"""
|
||||
get the logger settings we need to init the Logger class
|
||||
|
||||
Returns:
|
||||
LoggerInit -- _description_
|
||||
"""
|
||||
return {
|
||||
"logger": self.logger,
|
||||
"log_queue": self.log_queue
|
||||
}
|
||||
|
||||
# __END__
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
"""
|
||||
All logging levels
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class LoggingLevel(Enum):
|
||||
"""
|
||||
Log class levels
|
||||
"""
|
||||
NOTSET = logging.NOTSET # 0
|
||||
DEBUG = logging.DEBUG # 10
|
||||
INFO = logging.INFO # 20
|
||||
WARNING = logging.WARNING # 30
|
||||
ERROR = logging.ERROR # 40
|
||||
CRITICAL = logging.CRITICAL # 50
|
||||
ALERT = 55 # 55 (for Sys log)
|
||||
EMERGENCY = 60 # 60 (for Sys log)
|
||||
EXCEPTION = 70 # 70 (manualy set, error but with higher level)
|
||||
# Alternative names
|
||||
WARN = logging.WARN # 30 (alias for WARNING)
|
||||
FATAL = logging.FATAL # 50 (alias for CRITICAL)
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, level_str: str):
|
||||
"""Convert string to LogLevel enum"""
|
||||
try:
|
||||
return cls[level_str.upper()]
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Invalid log level: {level_str}") from e
|
||||
except AttributeError as e:
|
||||
raise ValueError(f"Invalid log level: {level_str}") from e
|
||||
|
||||
@classmethod
|
||||
def from_int(cls, level_int: int):
|
||||
"""Convert integer to LogLevel enum"""
|
||||
try:
|
||||
return cls(level_int)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid log level: {level_int}") from e
|
||||
|
||||
@classmethod
|
||||
def from_any(cls, level_any: Any):
|
||||
"""
|
||||
Convert any vale
|
||||
if self LoggingLevel return as is, else try to convert from int or string
|
||||
|
||||
Arguments:
|
||||
level_any {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
_type_ -- _description_
|
||||
"""
|
||||
if isinstance(level_any, LoggingLevel):
|
||||
return level_any
|
||||
if isinstance(level_any, int):
|
||||
return cls.from_int(level_any)
|
||||
return cls.from_string(level_any)
|
||||
|
||||
def to_logging_level(self):
|
||||
"""Convert to logging module level"""
|
||||
return self.value
|
||||
|
||||
def to_lower_case(self):
|
||||
"""return loser case"""
|
||||
return self.name.lower()
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def includes(self, level: 'LoggingLevel'):
|
||||
"""
|
||||
if given level is included in set level
|
||||
eg: INFO set, ERROR is included in INFO because INFO level would print ERROR
|
||||
"""
|
||||
return self.value <= level.value
|
||||
|
||||
def is_higher_than(self, level: 'LoggingLevel'):
|
||||
"""if given value is higher than set"""
|
||||
return self.value > level.value
|
||||
|
||||
def is_lower_than(self, level: 'LoggingLevel'):
|
||||
"""if given value is lower than set"""
|
||||
return self.value < level.value
|
||||
|
||||
# __END__
|
||||
20
src/corelibs/requests_handling/auth_helpers.py
Normal file
20
src/corelibs/requests_handling/auth_helpers.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""
|
||||
Various HTTP auth helpers
|
||||
"""
|
||||
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
def basic_auth(username: str, password: str) -> str:
|
||||
"""
|
||||
setup basic auth, for debug
|
||||
|
||||
Arguments:
|
||||
username {str} -- _description_
|
||||
password {str} -- _description_
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
token = b64encode(f"{username}:{password}".encode('utf-8')).decode("ascii")
|
||||
return f'Basic {token}'
|
||||
@@ -18,11 +18,12 @@ class Caller:
|
||||
header: dict[str, str],
|
||||
verify: bool = True,
|
||||
timeout: int = 20,
|
||||
proxy: dict[str, str] | None = None
|
||||
proxy: dict[str, str] | None = None,
|
||||
ca_file: str | None = None
|
||||
):
|
||||
self.headers = header
|
||||
self.timeout: int = timeout
|
||||
self.cafile = "/Library/Application Support/Netskope/STAgent/data/nscacert.pem"
|
||||
self.cafile = ca_file
|
||||
self.verify = verify
|
||||
self.proxy = proxy
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ show_position(file pos optional)
|
||||
import time
|
||||
from typing import Literal
|
||||
from math import floor
|
||||
from corelibs.string_handling.datetime_helpers import convert_timestamp
|
||||
from corelibs.datetime_handling.timestamp_convert import convert_timestamp
|
||||
from corelibs.string_handling.byte_helpers import format_bytes
|
||||
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
"""
|
||||
Various string based date/time helpers
|
||||
"""
|
||||
|
||||
from math import floor
|
||||
import time
|
||||
|
||||
|
||||
def convert_timestamp(timestamp: float | int, show_micro: bool = True) -> str:
|
||||
"""
|
||||
format timestamp into human readable format
|
||||
|
||||
Arguments:
|
||||
timestamp {float} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
show_micro {bool} -- _description_ (default: {True})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
# cut of the ms, but first round them up to four
|
||||
__timestamp_ms_split = str(round(timestamp, 4)).split(".")
|
||||
timestamp = int(__timestamp_ms_split[0])
|
||||
try:
|
||||
ms = int(__timestamp_ms_split[1])
|
||||
except IndexError:
|
||||
ms = 0
|
||||
timegroups = (86400, 3600, 60, 1)
|
||||
output: list[int] = []
|
||||
for i in timegroups:
|
||||
output.append(int(floor(timestamp / i)))
|
||||
timestamp = timestamp % i
|
||||
# output has days|hours|min|sec ms
|
||||
time_string = ""
|
||||
if output[0]:
|
||||
time_string = f"{output[0]}d"
|
||||
if output[0] or output[1]:
|
||||
time_string += f"{output[1]}h "
|
||||
if output[0] or output[1] or output[2]:
|
||||
time_string += f"{output[2]}m "
|
||||
time_string += f"{output[3]}s"
|
||||
if show_micro:
|
||||
time_string += f" {ms}ms" if ms else " 0ms"
|
||||
return time_string
|
||||
|
||||
|
||||
def create_time(timestamp: float, timestamp_format: str = "%Y-%m-%d %H:%M:%S") -> str:
|
||||
"""
|
||||
just takes a timestamp and prints out humand readable format
|
||||
|
||||
Arguments:
|
||||
timestamp {float} -- _description_
|
||||
|
||||
Keyword Arguments:
|
||||
timestamp_format {_type_} -- _description_ (default: {"%Y-%m-%d %H:%M:%S"})
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
return time.strftime(timestamp_format, time.localtime(timestamp))
|
||||
|
||||
# __END__
|
||||
@@ -2,16 +2,20 @@
|
||||
String helpers
|
||||
"""
|
||||
|
||||
import re
|
||||
from decimal import Decimal, getcontext
|
||||
from textwrap import shorten
|
||||
|
||||
|
||||
def shorten_string(string: str, length: int, hard_shorten: bool = False, placeholder: str = " [~]") -> str:
|
||||
def shorten_string(
|
||||
string: str | int | float, length: int, hard_shorten: bool = False, placeholder: str = " [~]"
|
||||
) -> str:
|
||||
"""
|
||||
check if entry is too long and cut it, but only for console output
|
||||
Note that if there are no spaces in the string, it will automatically use the hard split mode
|
||||
|
||||
Args:
|
||||
string (str): _description_
|
||||
string (str | int | float): _description_
|
||||
length (int): _description_
|
||||
hard_shorten (bool): if shorte should be done on fixed string lenght. Default: False
|
||||
placeholder (str): placeholder string. Default: " [~]"
|
||||
@@ -19,13 +23,19 @@ def shorten_string(string: str, length: int, hard_shorten: bool = False, placeho
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
length = int(length)
|
||||
string = str(string)
|
||||
# if placeholder > lenght
|
||||
if len(string) > length:
|
||||
if hard_shorten is True or " " not in string:
|
||||
# hard shorten error
|
||||
if len(placeholder) > length:
|
||||
raise ValueError(f"Cannot shorten string: placeholder {placeholder} is too large for max width")
|
||||
short_string = f"{string[:(length - len(placeholder))]}{placeholder}"
|
||||
else:
|
||||
short_string = shorten(string, width=length, placeholder=placeholder)
|
||||
try:
|
||||
short_string = shorten(string, width=length, placeholder=placeholder)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Cannot shorten string: {e}") from e
|
||||
else:
|
||||
short_string = string
|
||||
|
||||
@@ -66,6 +76,9 @@ def format_number(number: float, precision: int = 0) -> str:
|
||||
format numbers, current trailing zeros does not work
|
||||
use {:,} or {:,.f} or {:,.<N>f} <N> = number instead of this
|
||||
|
||||
The upper limit of the precision depends on the value of the number itself
|
||||
very large numbers will have no precision at all any more
|
||||
|
||||
Arguments:
|
||||
number {float} -- _description_
|
||||
|
||||
@@ -75,12 +88,35 @@ def format_number(number: float, precision: int = 0) -> str:
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
if precision < 0 and precision > 100:
|
||||
if precision < 0 or precision > 100:
|
||||
precision = 0
|
||||
if precision > 0:
|
||||
getcontext().prec = precision
|
||||
# make it a string to avoid mangling
|
||||
_number = Decimal(str(number))
|
||||
else:
|
||||
_number = number
|
||||
return (
|
||||
"{:,."
|
||||
f"{str(precision)}"
|
||||
"f}"
|
||||
).format(number)
|
||||
).format(_number)
|
||||
|
||||
|
||||
def prepare_url_slash(url: str) -> str:
|
||||
"""
|
||||
if the URL does not start with /, add slash
|
||||
strip all double slashes in URL
|
||||
|
||||
Arguments:
|
||||
url {str} -- _description_
|
||||
|
||||
Returns:
|
||||
str -- _description_
|
||||
"""
|
||||
url = re.sub(r'\/+', '/', url)
|
||||
if not url.startswith("/"):
|
||||
url = "/" + url
|
||||
return url
|
||||
|
||||
# __END__
|
||||
|
||||
156
src/corelibs/string_handling/text_colors.py
Normal file
156
src/corelibs/string_handling/text_colors.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Basic ANSI colors
|
||||
|
||||
Set colors with print(f"something {Colors.yellow}colorful{Colors.end})
|
||||
bold + underline + color combinations are possible.
|
||||
"""
|
||||
|
||||
|
||||
class Colors:
|
||||
"""
|
||||
ANSI colors defined
|
||||
"""
|
||||
# General sets, these should not be accessd
|
||||
__BOLD = '\033[1m'
|
||||
__UNDERLINE = '\033[4m'
|
||||
__END = '\033[0m'
|
||||
__RESET = '\033[0m'
|
||||
# Define ANSI color codes as class attributes
|
||||
__BLACK = "\033[30m"
|
||||
__RED = "\033[31m"
|
||||
__GREEN = "\033[32m"
|
||||
__YELLOW = "\033[33m"
|
||||
__BLUE = "\033[34m"
|
||||
__MAGENTA = "\033[35m"
|
||||
__CYAN = "\033[36m"
|
||||
__WHITE = "\033[37m"
|
||||
|
||||
# Define bold/bright versions of the colors
|
||||
__BLACK_BOLD = "\033[1;30m"
|
||||
__RED_BOLD = "\033[1;31m"
|
||||
__GREEN_BOLD = "\033[1;32m"
|
||||
__YELLOW_BOLD = "\033[1;33m"
|
||||
__BLUE_BOLD = "\033[1;34m"
|
||||
__MAGENTA_BOLD = "\033[1;35m"
|
||||
__CYAN_BOLD = "\033[1;36m"
|
||||
__WHITE_BOLD = "\033[1;37m"
|
||||
|
||||
# BRIGHT, alternative
|
||||
__BLACK_BRIGHT = '\033[90m'
|
||||
__RED_BRIGHT = '\033[91m'
|
||||
__GREEN_BRIGHT = '\033[92m'
|
||||
__YELLOW_BRIGHT = '\033[93m'
|
||||
__BLUE_BRIGHT = '\033[94m'
|
||||
__MAGENTA_BRIGHT = '\033[95m'
|
||||
__CYAN_BRIGHT = '\033[96m'
|
||||
__WHITE_BRIGHT = '\033[97m'
|
||||
|
||||
# set access vars
|
||||
bold = __BOLD
|
||||
underline = __UNDERLINE
|
||||
end = __END
|
||||
reset = __RESET
|
||||
# normal
|
||||
black = __BLACK
|
||||
red = __RED
|
||||
green = __GREEN
|
||||
yellow = __YELLOW
|
||||
blue = __BLUE
|
||||
magenta = __MAGENTA
|
||||
cyan = __CYAN
|
||||
white = __WHITE
|
||||
# bold
|
||||
black_bold = __BLACK_BOLD
|
||||
red_bold = __RED_BOLD
|
||||
green_bold = __GREEN_BOLD
|
||||
yellow_bold = __YELLOW_BOLD
|
||||
blue_bold = __BLUE_BOLD
|
||||
magenta_bold = __MAGENTA_BOLD
|
||||
cyan_bold = __CYAN_BOLD
|
||||
white_bold = __WHITE_BOLD
|
||||
# bright
|
||||
black_bright = __BLACK_BRIGHT
|
||||
red_bright = __RED_BRIGHT
|
||||
green_bright = __GREEN_BRIGHT
|
||||
yellow_bright = __YELLOW_BRIGHT
|
||||
blue_bright = __BLUE_BRIGHT
|
||||
magenta_bright = __MAGENTA_BRIGHT
|
||||
cyan_bright = __CYAN_BRIGHT
|
||||
white_bright = __WHITE_BRIGHT
|
||||
|
||||
@staticmethod
|
||||
def disable():
|
||||
"""
|
||||
No colors
|
||||
"""
|
||||
Colors.bold = ''
|
||||
Colors.underline = ''
|
||||
Colors.end = ''
|
||||
Colors.reset = ''
|
||||
# normal
|
||||
Colors.black = ''
|
||||
Colors.red = ''
|
||||
Colors.green = ''
|
||||
Colors.yellow = ''
|
||||
Colors.blue = ''
|
||||
Colors.magenta = ''
|
||||
Colors.cyan = ''
|
||||
Colors.white = ''
|
||||
# bold/bright
|
||||
Colors.black_bold = ''
|
||||
Colors.red_bold = ''
|
||||
Colors.green_bold = ''
|
||||
Colors.yellow_bold = ''
|
||||
Colors.blue_bold = ''
|
||||
Colors.magenta_bold = ''
|
||||
Colors.cyan_bold = ''
|
||||
Colors.white_bold = ''
|
||||
# bold/bright alt
|
||||
Colors.black_bright = ''
|
||||
Colors.red_bright = ''
|
||||
Colors.green_bright = ''
|
||||
Colors.yellow_bright = ''
|
||||
Colors.blue_bright = ''
|
||||
Colors.magenta_bright = ''
|
||||
Colors.cyan_bright = ''
|
||||
Colors.white_bright = ''
|
||||
|
||||
@staticmethod
|
||||
def reset_colors():
|
||||
"""
|
||||
reset colors to the original ones
|
||||
"""
|
||||
# set access vars
|
||||
Colors.bold = Colors.__BOLD
|
||||
Colors.underline = Colors.__UNDERLINE
|
||||
Colors.end = Colors.__END
|
||||
Colors.reset = Colors.__RESET
|
||||
# normal
|
||||
Colors.black = Colors.__BLACK
|
||||
Colors.red = Colors.__RED
|
||||
Colors.green = Colors.__GREEN
|
||||
Colors.yellow = Colors.__YELLOW
|
||||
Colors.blue = Colors.__BLUE
|
||||
Colors.magenta = Colors.__MAGENTA
|
||||
Colors.cyan = Colors.__CYAN
|
||||
Colors.white = Colors.__WHITE
|
||||
# bold
|
||||
Colors.black_bold = Colors.__BLACK_BOLD
|
||||
Colors.red_bold = Colors.__RED_BOLD
|
||||
Colors.green_bold = Colors.__GREEN_BOLD
|
||||
Colors.yellow_bold = Colors.__YELLOW_BOLD
|
||||
Colors.blue_bold = Colors.__BLUE_BOLD
|
||||
Colors.magenta_bold = Colors.__MAGENTA_BOLD
|
||||
Colors.cyan_bold = Colors.__CYAN_BOLD
|
||||
Colors.white_bold = Colors.__WHITE_BOLD
|
||||
# bright
|
||||
Colors.black_bright = Colors.__BLACK_BRIGHT
|
||||
Colors.red_bright = Colors.__RED_BRIGHT
|
||||
Colors.green_bright = Colors.__GREEN_BRIGHT
|
||||
Colors.yellow_bright = Colors.__YELLOW_BRIGHT
|
||||
Colors.blue_bright = Colors.__BLUE_BRIGHT
|
||||
Colors.magenta_bright = Colors.__MAGENTA_BRIGHT
|
||||
Colors.cyan_bright = Colors.__CYAN_BRIGHT
|
||||
Colors.white_bright = Colors.__WHITE_BRIGHT
|
||||
|
||||
# __END__
|
||||
0
src/corelibs/var_handling/__init__.py
Normal file
0
src/corelibs/var_handling/__init__.py
Normal file
75
src/corelibs/var_handling/enum_base.py
Normal file
75
src/corelibs/var_handling/enum_base.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
Enum base classes
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
|
||||
class EnumBase(Enum):
|
||||
"""
|
||||
base for enum
|
||||
lookup_any and from_any will return "EnumBase" and the sub class name
|
||||
run the return again to "from_any" to get a clean value, or cast it
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def lookup_key(cls, enum_key: str):
|
||||
"""Lookup from key side (must be string)"""
|
||||
# if there is a ":", then this is legacy, replace with ___
|
||||
if ":" in enum_key:
|
||||
enum_key = enum_key.replace(':', '___')
|
||||
try:
|
||||
return cls[enum_key.upper()]
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Invalid key: {enum_key}") from e
|
||||
except AttributeError as e:
|
||||
raise ValueError(f"Invalid key: {enum_key}") from e
|
||||
|
||||
@classmethod
|
||||
def lookup_value(cls, enum_value: Any):
|
||||
"""Lookup through value side"""
|
||||
try:
|
||||
return cls(enum_value)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value: {enum_value}") from e
|
||||
|
||||
@classmethod
|
||||
def from_any(cls, enum_any: Any):
|
||||
"""
|
||||
This only works in the following order
|
||||
-> class itself, as is
|
||||
-> str, assume key lookup
|
||||
-> if failed try other
|
||||
|
||||
Arguments:
|
||||
enum_any {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
_type_ -- _description_
|
||||
"""
|
||||
if isinstance(enum_any, cls):
|
||||
return enum_any
|
||||
# try key first if it is string
|
||||
# if failed try value
|
||||
if isinstance(enum_any, str):
|
||||
try:
|
||||
return cls.lookup_key(enum_any)
|
||||
except (ValueError, AttributeError):
|
||||
try:
|
||||
return cls.lookup_value(enum_any)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Could not find as key or value: {enum_any}") from e
|
||||
return cls.lookup_value(enum_any)
|
||||
|
||||
def to_value(self) -> Any:
|
||||
"""Convert to value"""
|
||||
return self.value
|
||||
|
||||
def to_lower_case(self) -> str:
|
||||
"""return lower case"""
|
||||
return self.name.lower()
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""return [Enum].NAME like it was called with .name"""
|
||||
return self.name
|
||||
65
src/corelibs/var_handling/var_helpers.py
Normal file
65
src/corelibs/var_handling/var_helpers.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""
|
||||
variable convert, check, etc helepr
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def is_int(string: Any) -> bool:
|
||||
"""
|
||||
check if a value is int
|
||||
|
||||
Arguments:
|
||||
string {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
try:
|
||||
int(string)
|
||||
return True
|
||||
except TypeError:
|
||||
return False
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_float(string: Any) -> bool:
|
||||
"""
|
||||
check if a value is float
|
||||
|
||||
Arguments:
|
||||
string {Any} -- _description_
|
||||
|
||||
Returns:
|
||||
bool -- _description_
|
||||
"""
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except TypeError:
|
||||
return False
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def str_to_bool(string: str):
|
||||
"""
|
||||
convert string to bool
|
||||
|
||||
Arguments:
|
||||
s {str} -- _description_
|
||||
|
||||
Raises:
|
||||
ValueError: _description_
|
||||
|
||||
Returns:
|
||||
_type_ -- _description_
|
||||
"""
|
||||
if string == "True" or string == "true":
|
||||
return True
|
||||
if string == "False" or string == "false":
|
||||
return False
|
||||
raise ValueError(f"Invalid boolean string: {string}")
|
||||
|
||||
# __END__
|
||||
34
test-run/config_handling/config/settings.ini
Normal file
34
test-run/config_handling/config/settings.ini
Normal file
@@ -0,0 +1,34 @@
|
||||
[TestA]
|
||||
foo=bar
|
||||
foobar=1
|
||||
bar=st
|
||||
some_match=foo
|
||||
some_match_list=foo,bar
|
||||
test_list=a,b,c,d f, g h
|
||||
other_list=a|b|c|d|
|
||||
third_list=xy|ab|df|fg
|
||||
str_length=foobar
|
||||
int_range=20
|
||||
int_range_not_set=
|
||||
int_range_not_set_empty_set=5
|
||||
#
|
||||
match_target=foo
|
||||
match_target_list=foo,bar,baz
|
||||
#
|
||||
match_source_a=foo
|
||||
match_source_b=foo
|
||||
; match_source_c=foo
|
||||
match_source_list=foo,bar
|
||||
|
||||
[TestB]
|
||||
element_a=Static energy
|
||||
element_b=123.5
|
||||
element_c=True
|
||||
elemend_d=AB:CD;EF
|
||||
email=foo@bar.com,other+bar-fee@domain-com.cp,
|
||||
email_not_mandatory=
|
||||
email_bad=gii@bar.com
|
||||
|
||||
[LoadTest]
|
||||
a.b.c=foo
|
||||
d:e:f=bar
|
||||
2
test-run/config_handling/log/.gitignore
vendored
Normal file
2
test-run/config_handling/log/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
125
test-run/config_handling/settings_loader.py
Normal file
125
test-run/config_handling/settings_loader.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""
|
||||
Settings loader test
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.logging_handling.log import Log
|
||||
from corelibs.config_handling.settings_loader import SettingsLoader
|
||||
from corelibs.config_handling.settings_loader_handling.settings_loader_check import SettingsLoaderCheck
|
||||
|
||||
SCRIPT_PATH: Path = Path(__file__).resolve().parent
|
||||
ROOT_PATH: Path = SCRIPT_PATH
|
||||
CONFIG_DIR: Path = Path("config")
|
||||
LOG_DIR: Path = Path("log")
|
||||
CONFIG_FILE: str = "settings.ini"
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main run
|
||||
"""
|
||||
|
||||
value = "2025/1/1"
|
||||
regex_c = re.compile(SettingsLoaderCheck.CHECK_SETTINGS['string.date']['regex'], re.VERBOSE)
|
||||
result = regex_c.search(value)
|
||||
print(f"regex {regex_c} check against {value} -> {result}")
|
||||
|
||||
# for log testing
|
||||
log = Log(
|
||||
log_path=ROOT_PATH.joinpath(LOG_DIR, 'settings_loader.log'),
|
||||
log_name="Settings Loader",
|
||||
log_settings={
|
||||
"log_level_console": 'DEBUG',
|
||||
"log_level_file": 'DEBUG',
|
||||
}
|
||||
)
|
||||
log.logger.info('Settings loader')
|
||||
|
||||
sl = SettingsLoader(
|
||||
{
|
||||
'foo': 'OVERLOAD'
|
||||
},
|
||||
ROOT_PATH.joinpath(CONFIG_DIR, CONFIG_FILE),
|
||||
log=log
|
||||
)
|
||||
try:
|
||||
config_load = 'TestA'
|
||||
config_data = sl.load_settings(
|
||||
config_load,
|
||||
{
|
||||
# "doesnt": ["split:,"],
|
||||
"foo": ["mandatory:yes"],
|
||||
"foobar": ["check:int"],
|
||||
"bar": ["mandatory:yes"],
|
||||
"some_match": ["matching:foo|bar"],
|
||||
"some_match_list": ["split:,", "matching:foo|bar"],
|
||||
"test_list": [
|
||||
"check:string.alphanumeric",
|
||||
"split:,"
|
||||
],
|
||||
"other_list": ["split:|"],
|
||||
"third_list": [
|
||||
"split:|",
|
||||
"check:string.alphanumeric"
|
||||
],
|
||||
"str_length": [
|
||||
"length:2-10"
|
||||
],
|
||||
"int_range": [
|
||||
"range:2-50"
|
||||
],
|
||||
"int_range_not_set": [
|
||||
"range:2-50"
|
||||
],
|
||||
"int_range_not_set_empty_set": [
|
||||
"empty:"
|
||||
],
|
||||
"match_target": ["matching:foo"],
|
||||
"match_target_list": ["split:,", "matching:foo|bar|baz",],
|
||||
"match_source_a": ["in:match_target"],
|
||||
"match_source_b": ["in:match_target_list"],
|
||||
"match_source_list": ["split:,", "in:match_target_list"],
|
||||
}
|
||||
)
|
||||
print(f"[{config_load}] Load: {config_load} -> {dump_data(config_data)}")
|
||||
except ValueError as e:
|
||||
print(f"Could not load settings: {e}")
|
||||
|
||||
try:
|
||||
config_load = 'TestB'
|
||||
config_data = sl.load_settings(
|
||||
config_load,
|
||||
{
|
||||
"email": [
|
||||
"split:,",
|
||||
"mandatory:yes",
|
||||
"check:string.email.basic"
|
||||
],
|
||||
"email_not_mandatory": [
|
||||
"split:,",
|
||||
# "mandatory:yes",
|
||||
"check:string.email.basic"
|
||||
],
|
||||
"email_bad": [
|
||||
"split:,",
|
||||
"mandatory:yes",
|
||||
"check:string.email.basic"
|
||||
]
|
||||
}
|
||||
)
|
||||
print(f"[{config_load}] Load: {config_load} -> {dump_data(config_data)}")
|
||||
except ValueError as e:
|
||||
print(f"Could not load settings: {e}")
|
||||
|
||||
try:
|
||||
config_load = 'LoadTest'
|
||||
config_data = sl.load_settings(config_load)
|
||||
print(f"[{config_load}] Load: {config_load} -> {dump_data(config_data)}")
|
||||
except ValueError as e:
|
||||
print(f"Could not load settings: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
236
test-run/datetime_handling/datetime_helpers.py
Normal file
236
test-run/datetime_handling/datetime_helpers.py
Normal file
@@ -0,0 +1,236 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
date string helper test
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from corelibs.datetime_handling.datetime_helpers import (
|
||||
get_datetime_iso8601, get_system_timezone, parse_timezone_data, validate_date,
|
||||
parse_flexible_date, compare_dates, find_newest_datetime_in_list,
|
||||
parse_day_of_week_range, parse_time_range, times_overlap_or_connect, is_time_in_range,
|
||||
reorder_weekdays_from_today
|
||||
)
|
||||
|
||||
|
||||
def __get_datetime_iso8601():
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
for tz in [
|
||||
'', 'Asia/Tokyo', 'UTC', 'Europe/Vienna',
|
||||
'America/New_York', 'Australia/Sydney',
|
||||
'invalid'
|
||||
]:
|
||||
print(f"{tz} -> {get_datetime_iso8601(tz)}")
|
||||
|
||||
|
||||
def __parse_timezone_data():
|
||||
for tz in [
|
||||
'JST', 'KST', 'UTC', 'CET', 'CEST',
|
||||
]:
|
||||
print(f"{tz} -> {parse_timezone_data(tz)}")
|
||||
|
||||
|
||||
def __validate_date():
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
|
||||
test_dates = [
|
||||
"2024-01-01",
|
||||
"2024-02-29", # Leap year
|
||||
"2023-02-29", # Invalid date
|
||||
"2024-13-01", # Invalid month
|
||||
"2024-00-10", # Invalid month
|
||||
"2024-04-31", # Invalid day
|
||||
"invalid-date"
|
||||
]
|
||||
|
||||
for date_str in test_dates:
|
||||
is_valid = validate_date(date_str)
|
||||
print(f"Date '{date_str}' is valid: {is_valid}")
|
||||
|
||||
# also test not before and not after
|
||||
not_before_dates = [
|
||||
"2023-12-31",
|
||||
"2024-01-01",
|
||||
"2024-02-29",
|
||||
]
|
||||
not_after_dates = [
|
||||
"2024-12-31",
|
||||
"2024-11-30",
|
||||
"2025-01-01",
|
||||
]
|
||||
|
||||
for date_str in not_before_dates:
|
||||
datetime.strptime(date_str, "%Y-%m-%d") # Ensure valid date format
|
||||
is_valid = validate_date(date_str, not_before=datetime.strptime("2024-01-01", "%Y-%m-%d"))
|
||||
print(f"Date '{date_str}' is valid (not before 2024-01-01): {is_valid}")
|
||||
|
||||
for date_str in not_after_dates:
|
||||
is_valid = validate_date(date_str, not_after=datetime.strptime("2024-12-31", "%Y-%m-%d"))
|
||||
print(f"Date '{date_str}' is valid (not after 2024-12-31): {is_valid}")
|
||||
|
||||
for date_str in test_dates:
|
||||
is_valid = validate_date(
|
||||
date_str,
|
||||
not_before=datetime.strptime("2024-01-01", "%Y-%m-%d"),
|
||||
not_after=datetime.strptime("2024-12-31", "%Y-%m-%d")
|
||||
)
|
||||
print(f"Date '{date_str}' is valid (2024 only): {is_valid}")
|
||||
|
||||
|
||||
def __parse_flexible_date():
|
||||
for date_str in [
|
||||
"2024-01-01",
|
||||
"01/02/2024",
|
||||
"February 29, 2024",
|
||||
"Invalid date",
|
||||
"2025-01-01 12:18:10",
|
||||
"2025-01-01 12:18:10.566",
|
||||
"2025-01-01T12:18:10.566",
|
||||
"2025-01-01T12:18:10.566+02:00",
|
||||
]:
|
||||
print(f"{date_str} -> {parse_flexible_date(date_str)}")
|
||||
|
||||
|
||||
def __compare_dates():
|
||||
|
||||
for date1, date2 in [
|
||||
("2024-01-01 12:00:00", "2024-01-01 15:30:00"),
|
||||
("2024-01-02", "2024-01-01"),
|
||||
("2024-01-01T10:00:00+02:00", "2024-01-01T08:00:00Z"),
|
||||
("invalid-date", "2024-01-01"),
|
||||
("2024-01-01", "invalid-date"),
|
||||
("invalid-date", "also-invalid"),
|
||||
]:
|
||||
result = compare_dates(date1, date2)
|
||||
print(f"Comparing '{date1}' and '{date2}': {result}")
|
||||
|
||||
|
||||
def __find_newest_datetime_in_list():
|
||||
date_list = [
|
||||
"2024-01-01 12:00:00",
|
||||
"2024-01-02 09:30:00",
|
||||
"2023-12-31 23:59:59",
|
||||
"2024-01-02 15:45:00",
|
||||
"2024-01-02T15:45:00.001",
|
||||
"invalid-date",
|
||||
]
|
||||
newest_date = find_newest_datetime_in_list(date_list)
|
||||
print(f"Newest date in list: {newest_date}")
|
||||
|
||||
|
||||
def __parse_day_of_week_range():
|
||||
ranges = [
|
||||
"Mon-Fri",
|
||||
"Saturday-Sunday",
|
||||
"Wed-Mon",
|
||||
"Fri-Fri",
|
||||
"mon-tue",
|
||||
"Invalid-Range"
|
||||
]
|
||||
for range_str in ranges:
|
||||
try:
|
||||
days = parse_day_of_week_range(range_str)
|
||||
print(f"Day range '{range_str}' -> {days}")
|
||||
except ValueError as e:
|
||||
print(f"[!] Error parsing day range '{range_str}': {e}")
|
||||
|
||||
|
||||
def __parse_time_range():
|
||||
ranges = [
|
||||
"08:00-17:00",
|
||||
"22:00-06:00",
|
||||
"12:30-12:30",
|
||||
"invalid-range"
|
||||
]
|
||||
for range_str in ranges:
|
||||
try:
|
||||
start_time, end_time = parse_time_range(range_str)
|
||||
print(f"Time range '{range_str}' -> Start: {start_time}, End: {end_time}")
|
||||
except ValueError as e:
|
||||
print(f"[!] Error parsing time range '{range_str}': {e}")
|
||||
|
||||
|
||||
def __times_overlap_or_connect():
|
||||
time_format = "%H:%M"
|
||||
time_ranges = [
|
||||
(("08:00", "12:00"), ("11:00", "15:00")), # Overlap
|
||||
(("22:00", "02:00"), ("01:00", "05:00")), # Overlap across midnight
|
||||
(("10:00", "12:00"), ("12:00", "14:00")), # Connect
|
||||
(("09:00", "11:00"), ("12:00", "14:00")), # No overlap
|
||||
]
|
||||
for (start1, end1), (start2, end2) in time_ranges:
|
||||
start1 = datetime.strptime(start1, time_format).time()
|
||||
end1 = datetime.strptime(end1, time_format).time()
|
||||
start2 = datetime.strptime(start2, time_format).time()
|
||||
end2 = datetime.strptime(end2, time_format).time()
|
||||
overlap = times_overlap_or_connect((start1, end1), (start2, end2))
|
||||
overlap_connect = times_overlap_or_connect((start1, end1), (start2, end2), True)
|
||||
print(f"Time ranges {start1}-{end1} and {start2}-{end2} overlap/connect: {overlap}/{overlap_connect}")
|
||||
|
||||
|
||||
def __is_time_in_range():
|
||||
time_format = "%H:%M:%S"
|
||||
test_cases = [
|
||||
("10:00:00", "09:00:00", "11:00:00"),
|
||||
("23:30:00", "22:00:00", "01:00:00"), # Across midnight
|
||||
("05:00:00", "06:00:00", "10:00:00"), # Not in range
|
||||
("12:00:00", "12:00:00", "12:00:00"), # Exact match
|
||||
]
|
||||
for (check_time, start_time, end_time) in test_cases:
|
||||
start_time = datetime.strptime(start_time, time_format).time()
|
||||
end_time = datetime.strptime(end_time, time_format).time()
|
||||
in_range = is_time_in_range(
|
||||
f"{check_time}", start_time.strftime("%H:%M:%S"), end_time.strftime("%H:%M:%S")
|
||||
)
|
||||
print(f"Time {check_time} in range {start_time}-{end_time}: {in_range}")
|
||||
|
||||
|
||||
def __reorder_weekdays_from_today():
|
||||
for base_day in [
|
||||
"Tue", "Wed", "Sunday", "Fri", "InvalidDay"
|
||||
]:
|
||||
try:
|
||||
reordered_days = reorder_weekdays_from_today(base_day)
|
||||
print(f"Reordered weekdays from {base_day}: {reordered_days}")
|
||||
except ValueError as e:
|
||||
print(f"[!] Error reordering weekdays from '{base_day}': {e}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
print("\nDatetime ISO 8601 tests:\n")
|
||||
__get_datetime_iso8601()
|
||||
print("\nSystem time test:")
|
||||
print(f"System time: {get_system_timezone()}")
|
||||
print("\nParse timezone data tests:\n")
|
||||
__parse_timezone_data()
|
||||
print("\nValidate date tests:\n")
|
||||
__validate_date()
|
||||
print("\nParse flexible date tests:\n")
|
||||
__parse_flexible_date()
|
||||
print("\nCompare dates tests:\n")
|
||||
__compare_dates()
|
||||
print("\nFind newest datetime in list tests:\n")
|
||||
__find_newest_datetime_in_list()
|
||||
print("\nParse day of week range tests:\n")
|
||||
__parse_day_of_week_range()
|
||||
print("\nParse time range tests:\n")
|
||||
__parse_time_range()
|
||||
print("\nTimes overlap or connect tests:\n")
|
||||
__times_overlap_or_connect()
|
||||
print("\nIs time in range tests:\n")
|
||||
__is_time_in_range()
|
||||
print("\nReorder weekdays from today tests:\n")
|
||||
__reorder_weekdays_from_today()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
92
test-run/datetime_handling/timestamp_convert.py
Normal file
92
test-run/datetime_handling/timestamp_convert.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
timestamp string checks
|
||||
"""
|
||||
|
||||
from corelibs.datetime_handling.timestamp_convert import (
|
||||
convert_timestamp, seconds_to_string, convert_to_seconds, TimeParseError, TimeUnitError
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
print("\n--- Testing convert_to_seconds ---\n")
|
||||
test_cases = [
|
||||
"5M 6d", # 5 months, 6 days
|
||||
"2h 30m 45s", # 2 hours, 30 minutes, 45 seconds
|
||||
"1Y 2M 3d", # 1 year, 2 months, 3 days
|
||||
"1h", # 1 hour
|
||||
"30m", # 30 minutes
|
||||
"2 hours 15 minutes", # 2 hours, 15 minutes
|
||||
"1d 12h", # 1 day, 12 hours
|
||||
"3M 2d 4h", # 3 months, 2 days, 4 hours
|
||||
"45s", # 45 seconds
|
||||
"-45s", # -45 seconds
|
||||
"-1h", # -1 hour
|
||||
"-30m", # -30 minutes
|
||||
"-2h 30m 45s", # -2 hours, 30 minutes, 45 seconds
|
||||
"-1d 12h", # -1 day, 12 hours
|
||||
"-3M 2d 4h", # -3 months, 2 days, 4 hours
|
||||
"-1Y 2M 3d", # -1 year, 2 months, 3 days
|
||||
"-2 hours 15 minutes", # -2 hours, 15 minutes
|
||||
"-1 year 2 months", # -1 year, 2 months
|
||||
"-2Y 6M 15d 8h 30m 45s", # Complex negative example
|
||||
"1 year 2 months", # 1 year, 2 months
|
||||
"2Y 6M 15d 8h 30m 45s", # Complex example
|
||||
# invalid tests
|
||||
"5M 6d 2M", # months appears twice
|
||||
"2h 30m 45s 1h", # hours appears twice
|
||||
"1d 2 days", # days appears twice (short and long form)
|
||||
"30m 45 minutes", # minutes appears twice
|
||||
"1Y 2 years", # years appears twice
|
||||
"1x 2 yrs", # invalid names
|
||||
|
||||
123, # int
|
||||
789.12, # float
|
||||
456.56, # float, high
|
||||
"4566", # int as string
|
||||
"5551.12", # float as string
|
||||
"5551.56", # float, high as string
|
||||
]
|
||||
|
||||
for time_string in test_cases:
|
||||
try:
|
||||
result = convert_to_seconds(time_string)
|
||||
print(f"Human readable to seconds: {time_string} => {result}")
|
||||
except (TimeParseError, TimeUnitError) as e:
|
||||
print(f"Error encountered for {time_string}: {type(e).__name__}: {e}")
|
||||
|
||||
print("\n--- Testing seconds_to_string and convert_timestamp ---\n")
|
||||
|
||||
test_values = [
|
||||
'as is string',
|
||||
-172800.001234, # -2 days, -0.001234 seconds
|
||||
-90061.789, # -1 day, -1 hour, -1 minute, -1.789 seconds
|
||||
-3661.456, # -1 hour, -1 minute, -1.456 seconds
|
||||
-65.123, # -1 minute, -5.123 seconds
|
||||
-1.5, # -1.5 seconds
|
||||
-0.001, # -1 millisecond
|
||||
-0.000001, # -1 microsecond
|
||||
0, # 0 seconds
|
||||
0.000001, # 1 microsecond
|
||||
0.001, # 1 millisecond
|
||||
1.5, # 1.5 seconds
|
||||
65.123, # 1 minute, 5.123 seconds
|
||||
3661.456, # 1 hour, 1 minute, 1.456 seconds
|
||||
90061.789, # 1 day, 1 hour, 1 minute, 1.789 seconds
|
||||
172800.001234 # 2 days, 0.001234 seconds
|
||||
]
|
||||
|
||||
for time_value in test_values:
|
||||
result = seconds_to_string(time_value, show_microseconds=True)
|
||||
result_alt = convert_timestamp(time_value, show_microseconds=True)
|
||||
print(f"Seconds to human readable: {time_value} => {result} / {result_alt}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
2
test-run/db_handling/database/.gitignore
vendored
Normal file
2
test-run/db_handling/database/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
2
test-run/db_handling/log/.gitignore
vendored
Normal file
2
test-run/db_handling/log/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
148
test-run/db_handling/sqlite_io.py
Normal file
148
test-run/db_handling/sqlite_io.py
Normal file
@@ -0,0 +1,148 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Main comment
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from uuid import uuid4
|
||||
import json
|
||||
import sqlite3
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.logging_handling.log import Log, Logger
|
||||
from corelibs.db_handling.sqlite_io import SQLiteIO
|
||||
|
||||
SCRIPT_PATH: Path = Path(__file__).resolve().parent
|
||||
ROOT_PATH: Path = SCRIPT_PATH
|
||||
DATABASE_DIR: Path = Path("database")
|
||||
LOG_DIR: Path = Path("log")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
log = Log(
|
||||
log_path=ROOT_PATH.joinpath(LOG_DIR, 'sqlite_io.log'),
|
||||
log_name="SQLite IO",
|
||||
log_settings={
|
||||
"log_level_console": 'DEBUG',
|
||||
"log_level_file": 'DEBUG',
|
||||
}
|
||||
)
|
||||
db = SQLiteIO(
|
||||
log=Logger(log.get_logger_settings()),
|
||||
db_name=ROOT_PATH.joinpath(DATABASE_DIR, 'test_sqlite_io.db'),
|
||||
row_factory='Dict'
|
||||
)
|
||||
if db.db_connected():
|
||||
log.info(f"Connected to DB: {db.db_name}")
|
||||
if db.trigger_exists('trg_test_a_set_date_updated_on_update'):
|
||||
log.info("Trigger trg_test_a_set_date_updated_on_update exists")
|
||||
if db.table_exists('test_a'):
|
||||
log.info("Table test_a exists, dropping for clean test")
|
||||
db.execute_query("DROP TABLE test_a;")
|
||||
# create a dummy table
|
||||
table_sql = """
|
||||
CREATE TABLE IF NOT EXISTS test_a (
|
||||
test_a_id INTEGER PRIMARY KEY,
|
||||
date_created TEXT DEFAULT (strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||
date_updated TEXT,
|
||||
uid TEXT NOT NULL UNIQUE,
|
||||
set_current_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
text_a TEXT,
|
||||
content,
|
||||
int_a INTEGER,
|
||||
float_a REAL
|
||||
);
|
||||
"""
|
||||
result = db.execute_query(table_sql)
|
||||
log.debug(f"Create table result: {result}")
|
||||
trigger_sql = """
|
||||
CREATE TRIGGER trg_test_a_set_date_updated_on_update
|
||||
AFTER UPDATE ON test_a
|
||||
FOR EACH ROW
|
||||
WHEN OLD.date_updated IS NULL OR NEW.date_updated = OLD.date_updated
|
||||
BEGIN
|
||||
UPDATE test_a
|
||||
SET date_updated = (strftime('%Y-%m-%d %H:%M:%f', 'now'))
|
||||
WHERE test_a_id = NEW.test_a_id;
|
||||
END;
|
||||
"""
|
||||
result = db.execute_query(trigger_sql)
|
||||
log.debug(f"Create trigger result: {result}")
|
||||
result = db.meta_data_detail('test_a')
|
||||
log.debug(f"Table meta data detail: {dump_data(result)}")
|
||||
# INSERT DATA
|
||||
sql = """
|
||||
INSERT INTO test_a (uid, text_a, content, int_a, float_a)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
RETURNING test_a_id, uid;
|
||||
"""
|
||||
result = db.execute_query(
|
||||
sql,
|
||||
(
|
||||
str(uuid4()),
|
||||
'Some text A',
|
||||
json.dumps({'foo': 'bar', 'number': 42}),
|
||||
123,
|
||||
123.456,
|
||||
)
|
||||
)
|
||||
log.debug(f"[1] Insert data result: {dump_data(result)}")
|
||||
__uid: str = ''
|
||||
if result is not False:
|
||||
# first one only of interest
|
||||
result = dict(result[0])
|
||||
__uid = str(result.get('uid', ''))
|
||||
# second insert
|
||||
result = db.execute_query(
|
||||
sql,
|
||||
(
|
||||
str(uuid4()),
|
||||
'Some text A',
|
||||
json.dumps({'foo': 'bar', 'number': 42}),
|
||||
123,
|
||||
123.456,
|
||||
)
|
||||
)
|
||||
log.debug(f"[2] Insert data result: {dump_data(result)}")
|
||||
result = db.execute_query("SELECT * FROM test_a;")
|
||||
log.debug(f"Select data result: {dump_data(result)}")
|
||||
result = db.return_one("SELECT * FROM test_a WHERE uid = ?;", (__uid,))
|
||||
log.debug(f"Fetch row result: {dump_data(result)}")
|
||||
sql = """
|
||||
UPDATE test_a
|
||||
SET text_a = ?
|
||||
WHERE uid = ?;
|
||||
"""
|
||||
result = db.execute_query(
|
||||
sql,
|
||||
(
|
||||
'Some updated text A',
|
||||
__uid,
|
||||
)
|
||||
)
|
||||
log.debug(f"Update data result: {dump_data(result)}")
|
||||
result = db.return_one("SELECT * FROM test_a WHERE uid = ?;", (__uid,))
|
||||
log.debug(f"Fetch row after update result: {dump_data(result)}")
|
||||
|
||||
db.db_close()
|
||||
|
||||
db = SQLiteIO(
|
||||
log=Logger(log.get_logger_settings()),
|
||||
db_name=ROOT_PATH.joinpath(DATABASE_DIR, 'test_sqlite_io.db'),
|
||||
row_factory='Row'
|
||||
)
|
||||
result = db.return_one("SELECT * FROM test_a WHERE uid = ?;", (__uid,))
|
||||
if result is not None and result is not False:
|
||||
log.debug(f"Fetch row result: {dump_data(result)} -> {dict(result)} -> {result.keys()}")
|
||||
log.debug(f"Access via index: {result[5]} -> {result['text_a']}")
|
||||
if isinstance(result, sqlite3.Row):
|
||||
log.debug('Result is sqlite3.Row as expected')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
34
test-run/encryption/symmetric_encryption.py
Normal file
34
test-run/encryption/symmetric_encryption.py
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Symmetric encryption test
|
||||
"""
|
||||
|
||||
import json
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.encryption_handling.symmetric_encryption import SymmetricEncryption
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
password = "strongpassword"
|
||||
se = SymmetricEncryption(password)
|
||||
|
||||
plaintext = "Hello, World!"
|
||||
ciphertext = se.encrypt_with_metadata_return_str(plaintext)
|
||||
decrypted = se.decrypt_with_metadata(ciphertext)
|
||||
print(f"Encrypted: {dump_data(json.loads(ciphertext))}")
|
||||
print(f"Input: {plaintext} -> {decrypted}")
|
||||
|
||||
static_ciphertext = SymmetricEncryption.encrypt_data(plaintext, password)
|
||||
decrypted = SymmetricEncryption.decrypt_data(static_ciphertext, password)
|
||||
print(f"Static Encrypted: {dump_data(json.loads(static_ciphertext))}")
|
||||
print(f"Input: {plaintext} -> {decrypted}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
31
test-run/file_handling/file_bom_check.py
Normal file
31
test-run/file_handling/file_bom_check.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
BOM check for files
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from corelibs.file_handling.file_bom_encoding import is_bom_encoded, is_bom_encoded_info
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Check files for BOM encoding
|
||||
"""
|
||||
base_path = Path(__file__).resolve().parent
|
||||
for file_path in [
|
||||
'test-data/sample_with_bom.csv',
|
||||
'test-data/sample_without_bom.csv',
|
||||
]:
|
||||
has_bom = is_bom_encoded(base_path.joinpath(file_path))
|
||||
bom_info = is_bom_encoded_info(base_path.joinpath(file_path))
|
||||
print(f'File: {file_path}')
|
||||
print(f' Has BOM: {has_bom}')
|
||||
print(f' BOM Info: {dump_data(bom_info)}')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
6
test-run/file_handling/test-data/sample_with_bom.csv
Normal file
6
test-run/file_handling/test-data/sample_with_bom.csv
Normal file
@@ -0,0 +1,6 @@
|
||||
Name,Age,City,Country
|
||||
John Doe,25,New York,USA
|
||||
Jane Smith,30,London,UK
|
||||
山田太郎,28,東京,Japan
|
||||
María García,35,Madrid,Spain
|
||||
François Dupont,42,Paris,France
|
||||
|
6
test-run/file_handling/test-data/sample_without_bom.csv
Normal file
6
test-run/file_handling/test-data/sample_without_bom.csv
Normal file
@@ -0,0 +1,6 @@
|
||||
Name,Age,City,Country
|
||||
John Doe,25,New York,USA
|
||||
Jane Smith,30,London,UK
|
||||
山田太郎,28,東京,Japan
|
||||
María García,35,Madrid,Spain
|
||||
François Dupont,42,Paris,France
|
||||
|
52
test-run/iterator_handling/data_search.py
Normal file
52
test-run/iterator_handling/data_search.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Search data tests
|
||||
iterator_handling.data_search
|
||||
"""
|
||||
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.iterator_handling.data_search import find_in_array_from_list, ArraySearchList
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
data = [
|
||||
{
|
||||
"lookup_value_p": "A01",
|
||||
"lookup_value_c": "B01",
|
||||
"replace_value": "R01",
|
||||
},
|
||||
{
|
||||
"lookup_value_p": "A02",
|
||||
"lookup_value_c": "B02",
|
||||
"replace_value": "R02",
|
||||
},
|
||||
]
|
||||
test_foo = ArraySearchList(
|
||||
key = "lookup_value_p",
|
||||
value = "A01"
|
||||
)
|
||||
print(test_foo)
|
||||
search: list[ArraySearchList] = [
|
||||
{
|
||||
"key": "lookup_value_p",
|
||||
"value": "A01"
|
||||
},
|
||||
{
|
||||
"key": "lookup_value_c",
|
||||
"value": "B01"
|
||||
}
|
||||
]
|
||||
|
||||
result = find_in_array_from_list(data, search)
|
||||
|
||||
print(f"Search {dump_data(search)} -> {dump_data(result)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
120
test-run/iterator_handling/dict_helpers.py
Normal file
120
test-run/iterator_handling/dict_helpers.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""
|
||||
Iterator helper testing
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.iterator_handling.dict_mask import mask
|
||||
from corelibs.iterator_handling.dict_helpers import set_entry
|
||||
|
||||
|
||||
def __mask():
|
||||
data = {
|
||||
# "user": "john",
|
||||
# "encryption_key": "Secret key",
|
||||
# "ENCRYPTION.TEST": "Secret key test",
|
||||
# "inside_password_test": "Hide this",
|
||||
"password": ["secret1", "secret2"], # List value gets masked
|
||||
# "config": {
|
||||
# "db_password": {"primary": "secret", "backup": "secret2"}, # Dict value gets masked
|
||||
# "api_keys": ["key1", "key2", "key3"] # List value gets masked
|
||||
# },
|
||||
# "items": [ # List value that doesn't get masked, but gets processed recursively
|
||||
# {"name": "item1", "secret_key": "itemsecret"},
|
||||
# {"name": "item2", "passwords": ["pass1", "pass2"]}
|
||||
# ],
|
||||
# "normal_list": ["item1", "item2", "item3"] # Normal list, not masked
|
||||
}
|
||||
data = {
|
||||
"config": {
|
||||
# "password": ["secret1", "secret2"],
|
||||
# "password_other": {"password": ["secret1", "secret2"]},
|
||||
# "database": {
|
||||
# "host": "localhost",
|
||||
# "password": "db_secret",
|
||||
# "users": [
|
||||
# {"name": "admin", "password": "admin123"},
|
||||
# {"name": "user", "secret_key": "user456"}
|
||||
# ]
|
||||
# },
|
||||
# "api": {
|
||||
# # "endpoints": ["api1", "api2"],
|
||||
# "encryption_settings": {
|
||||
# "enabled": True,
|
||||
# "secret": "api_secret"
|
||||
# }
|
||||
# }
|
||||
"secret_key": "normal_value",
|
||||
"api_key": "normal_value",
|
||||
"my_key_value": "normal_value",
|
||||
}
|
||||
}
|
||||
data = {
|
||||
"basic": {
|
||||
"log_level_console": "DEBUG",
|
||||
"log_level_file": "DEBUG",
|
||||
"storage_interface": "sqlite",
|
||||
"content_start_date": "2023-1-1",
|
||||
"encryption_key": "ENCRYPTION_KEY"
|
||||
},
|
||||
"email": {
|
||||
"alert_email": [
|
||||
"test+z-sd@tequila.jp"
|
||||
]
|
||||
},
|
||||
"poller": {
|
||||
"max_forks": "1",
|
||||
"interface": "Zac"
|
||||
},
|
||||
"pusher": {
|
||||
"max_forks": "3",
|
||||
"interface": "Screendragon"
|
||||
},
|
||||
"api:Zac": {
|
||||
"type": "zac",
|
||||
"client_id": "oro_zac_demo",
|
||||
"client_secret": "CLIENT_SECRET",
|
||||
"username": "zacuser",
|
||||
"password": "ZACuser3",
|
||||
"hostname": "e-gra2.zac.ai",
|
||||
"appname": "e-gra2_api_trial",
|
||||
"api_path": "b/api/v2"
|
||||
},
|
||||
"api:Screendragon": {
|
||||
"type": "screendragon",
|
||||
"client_id": "omniprostaging",
|
||||
"encryption_client": "SOME_SECRET",
|
||||
"client_encryption": "SOME_SECRET",
|
||||
"secret_client": "SOME_SECRET",
|
||||
"client_secret": "SOME_SECRET",
|
||||
"hostname": "omniprostaging.screendragon.com",
|
||||
"appname": "sdapi",
|
||||
"api_path": "api"
|
||||
}
|
||||
}
|
||||
result = mask(data)
|
||||
print(f"** In: {dump_data(data)}")
|
||||
print(f"===> Masked: {dump_data(result)}")
|
||||
|
||||
|
||||
def __set_dict_value_entry():
|
||||
|
||||
dict_empty: dict[str, Any] = {}
|
||||
new = set_entry(dict_empty, 'a.b.c', 1)
|
||||
print(f"[1] Set dict entry: {dump_data(new)}")
|
||||
new = set_entry(new, 'dict', {'key': 'value'})
|
||||
print(f"[2] Set dict entry: {dump_data(new)}")
|
||||
new = set_entry(new, 'list', [1, 2, 3])
|
||||
print(f"[3] Set dict entry: {dump_data(new)}")
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Test: corelibs.string_handling.string_helpers
|
||||
"""
|
||||
__mask()
|
||||
__set_dict_value_entry()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
29
test-run/iterator_handling/list_helpers.py
Normal file
29
test-run/iterator_handling/list_helpers.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""
|
||||
test list helpers
|
||||
"""
|
||||
|
||||
from corelibs.iterator_handling.list_helpers import is_list_in_list, convert_to_list
|
||||
|
||||
|
||||
def __test_is_list_in_list_a():
|
||||
list_a = [1, "hello", 3.14, True, "world"]
|
||||
list_b = ["hello", True, 42]
|
||||
result = is_list_in_list(list_a, list_b)
|
||||
print(f"RESULT: {result}")
|
||||
|
||||
|
||||
def __convert_list():
|
||||
source = "hello"
|
||||
result = convert_to_list(source)
|
||||
print(f"IN: {source} -> {result}")
|
||||
|
||||
|
||||
def main():
|
||||
__test_is_list_in_list_a()
|
||||
__convert_list()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
54
test-run/json_handling/jmespath_helper.py
Normal file
54
test-run/json_handling/jmespath_helper.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
jmes path testing
|
||||
"""
|
||||
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.json_handling.jmespath_helper import jmespath_search
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
__set = {
|
||||
'a': 'b',
|
||||
'foobar': [1, 2, 'a'],
|
||||
'bar': {
|
||||
'a': 1,
|
||||
'b': 'c'
|
||||
},
|
||||
'baz': [
|
||||
{
|
||||
'aa': 1,
|
||||
'ab': 'cc'
|
||||
},
|
||||
{
|
||||
'ba': 2,
|
||||
'bb': 'dd'
|
||||
},
|
||||
],
|
||||
'foo': {
|
||||
'a': [1, 2, 3],
|
||||
'b': ['a', 'b', 'c']
|
||||
}
|
||||
}
|
||||
|
||||
__get = [
|
||||
'a',
|
||||
'bar.a',
|
||||
'foo.a',
|
||||
'baz[].aa',
|
||||
"[?\"c\" && contains(\"c\", 'b')]",
|
||||
"[?contains(\"c\", 'b')]",
|
||||
]
|
||||
for __jmespath in __get:
|
||||
result = jmespath_search(__set, __jmespath)
|
||||
print(f"GET {__jmespath}: {dump_data(result)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
52
test-run/json_handling/json_replace.py
Normal file
52
test-run/json_handling/json_replace.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
JSON content replace tets
|
||||
"""
|
||||
|
||||
from deepdiff import DeepDiff
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
from corelibs.json_handling.json_helper import modify_with_jsonpath
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
__data = {
|
||||
'a': 'b',
|
||||
'foobar': [1, 2, 'a'],
|
||||
'bar': {
|
||||
'a': 1,
|
||||
'b': 'c'
|
||||
},
|
||||
'baz': [
|
||||
{
|
||||
'aa': 1,
|
||||
'ab': 'cc'
|
||||
},
|
||||
{
|
||||
'ba': 2,
|
||||
'bb': 'dd'
|
||||
},
|
||||
],
|
||||
'foo': {
|
||||
'a': [1, 2, 3],
|
||||
'b': ['a', 'b', 'c']
|
||||
}
|
||||
}
|
||||
|
||||
# Modify some values using JSONPath
|
||||
__replace_data = modify_with_jsonpath(__data, 'bar.a', 42)
|
||||
__replace_data = modify_with_jsonpath(__replace_data, 'foo.b[1]', 'modified')
|
||||
__replace_data = modify_with_jsonpath(__replace_data, 'baz[0].ab', 'changed')
|
||||
|
||||
print(f"Original Data:\n{dump_data(__data)}\n")
|
||||
print(f"Modified Data:\n{dump_data(__replace_data)}\n")
|
||||
print(f"Differences:\n{dump_data(DeepDiff(__data, __replace_data, verbose_level=2))}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
115
test-run/logging_handling/log.py
Normal file
115
test-run/logging_handling/log.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Log logging_handling.log testing
|
||||
"""
|
||||
|
||||
# import atexit
|
||||
import sys
|
||||
from pathlib import Path
|
||||
# this is for testing only
|
||||
from corelibs.logging_handling.log import Log, Logger, ConsoleFormat, ConsoleFormatSettings
|
||||
from corelibs.debug_handling.debug_helpers import exception_stack, call_stack
|
||||
from corelibs.logging_handling.logging_level_handling.logging_level import LoggingLevel
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Log testing
|
||||
"""
|
||||
script_path: Path = Path(__file__).resolve().parent
|
||||
log = Log(
|
||||
log_path=script_path.joinpath('log', 'test.log'),
|
||||
log_name="Test Log",
|
||||
log_settings={
|
||||
"log_level_console": 'DEBUG',
|
||||
# "log_level_console": None,
|
||||
"log_level_file": 'DEBUG',
|
||||
# "console_color_output_enabled": False,
|
||||
"per_run_log": True,
|
||||
# Set console log type, must be sent as value for ConsoleFormat or bitwise of ConsoleFormatType
|
||||
# "console_format_type": ConsoleFormatSettings.BARE,
|
||||
# "console_format_type": ConsoleFormatSettings.MINIMAL,
|
||||
# "console_format_type": ConsoleFormatType.TIME_MICROSECONDS | ConsoleFormatType.NAME,
|
||||
# "console_format_type": ConsoleFormatType.NAME,
|
||||
"console_format_type": ConsoleFormat.TIME | ConsoleFormat.TIMEZONE | ConsoleFormat.LINENO,
|
||||
}
|
||||
)
|
||||
logn = Logger(log.get_logger_settings())
|
||||
|
||||
log.info("ConsoleFormatType FILE is: %s", ConsoleFormat.FILE)
|
||||
log.info("ConsoleFormatSettings ALL is: %s", ConsoleFormatSettings.ALL)
|
||||
|
||||
log.logger.debug('[NORMAL] Debug test: %s', log.logger.name)
|
||||
log.lg.debug('[NORMAL] Debug test: %s', log.logger.name)
|
||||
log.debug('[NORMAL-] Debug test: %s', log.logger.name)
|
||||
logn.lg.debug('[NORMAL N] Debug test: %s', log.logger.name)
|
||||
logn.debug('[NORMAL N-] Debug test: %s', log.logger.name)
|
||||
log.logger.info('[NORMAL] Info test: %s', log.logger.name)
|
||||
log.info('[NORMAL-] Info test: %s', log.logger.name)
|
||||
log.logger.warning('[NORMAL] Warning test: %s', log.logger.name)
|
||||
log.warning('[NORMAL-] Warning test: %s', log.logger.name)
|
||||
log.logger.error('[NORMAL] Error test: %s', log.logger.name)
|
||||
log.error('[NORMAL-] Error test: %s', log.logger.name)
|
||||
log.logger.critical('[NORMAL] Critical test: %s', log.logger.name)
|
||||
log.critical('[NORMAL-] Critical test: %s', log.logger.name)
|
||||
log.logger.log(LoggingLevel.ALERT.value, '[NORMAL] alert test: %s', log.logger.name)
|
||||
log.alert('[NORMAL-] alert test: %s', log.logger.name)
|
||||
log.emergency('[NORMAL-] emergency test: %s', log.logger.name)
|
||||
log.logger.log(LoggingLevel.EMERGENCY.value, '[NORMAL] emergency test: %s', log.logger.name)
|
||||
log.exception('[NORMAL] Exception test: %s', log.logger.name)
|
||||
log.logger.log(LoggingLevel.EXCEPTION.value, '[NORMAL] exception test: %s', log.logger.name, exc_info=True)
|
||||
|
||||
bad_level = 'WRONG'
|
||||
if not Log.validate_log_level(bad_level):
|
||||
print(f"Invalid level: {bad_level}")
|
||||
good_level = 'WARNING'
|
||||
if Log.validate_log_level(good_level):
|
||||
print(f"Valid level: {good_level}")
|
||||
|
||||
print(f"ERROR is to_logging_level(): {LoggingLevel.ERROR.to_logging_level()}")
|
||||
print(f"ERROR is to_lower_case(): {LoggingLevel.ERROR.to_lower_case()}")
|
||||
print(f"ERROR is: {LoggingLevel.ERROR}")
|
||||
print(f"ERROR is value: {LoggingLevel.ERROR.value}")
|
||||
print(f"ERROR is name: {LoggingLevel.ERROR.name}")
|
||||
print(f"ERROR is from_string(lower): {LoggingLevel.from_string('ERROR')}")
|
||||
print(f"ERROR is from_string(upper): {LoggingLevel.from_string('ERROR')}")
|
||||
print(f"ERROR is from_int: {LoggingLevel.from_int(40)}")
|
||||
print(f"ERROR is from_any(text lower): {LoggingLevel.from_any('ERROR')}")
|
||||
print(f"ERROR is from_any(text upper): {LoggingLevel.from_any('ERROR')}")
|
||||
print(f"ERROR is from_any(int): {LoggingLevel.from_any(40)}")
|
||||
print(f"INFO <= ERROR: {LoggingLevel.INFO.includes(LoggingLevel.ERROR)}")
|
||||
print(f"INFO > ERROR: {LoggingLevel.INFO.is_higher_than(LoggingLevel.ERROR)}")
|
||||
print(f"INFO < ERROR: {LoggingLevel.INFO.is_lower_than(LoggingLevel.ERROR)}")
|
||||
print(f"INFO < ERROR: {LoggingLevel.INFO.is_lower_than(LoggingLevel.ERROR)}")
|
||||
|
||||
try:
|
||||
print(f"INVALID is A: {LoggingLevel.from_string('INVALID')}")
|
||||
except ValueError as e:
|
||||
print(f"* ERROR: {e}")
|
||||
|
||||
try:
|
||||
__test = 5 / 0
|
||||
print(f"Divied: {__test}")
|
||||
except ZeroDivisionError as e:
|
||||
print(f"** sys.exec_info(): {sys.exc_info()}")
|
||||
print(f"** sys.exec_info(): [{exception_stack()}] | [{exception_stack(sys.exc_info())}] | [{call_stack()}]")
|
||||
log.logger.critical("Divison through zero: %s", e)
|
||||
log.exception("Divison through zero: %s", e)
|
||||
|
||||
for handler in log.logger.handlers:
|
||||
print(
|
||||
f"** Handler (logger) {handler} [{handler.name}] -> "
|
||||
f"{handler.level} -> {LoggingLevel.from_any(handler.level)}"
|
||||
)
|
||||
|
||||
for key, handler in log.handlers.items():
|
||||
print(f"Handler (handlers) [{key}] {handler} -> {handler.level} -> {LoggingLevel.from_any(handler.level)}")
|
||||
log.set_log_level('stream_handler', LoggingLevel.ERROR)
|
||||
log.logger.warning('[NORMAL] Invisible Warning test: %s', log.logger.name)
|
||||
log.logger.error('[NORMAL] Visible Error test: %s', log.logger.name)
|
||||
# log.handlers['stream_handler'].se
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
2
test-run/logging_handling/log/.gitignore
vendored
Normal file
2
test-run/logging_handling/log/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
91
test-run/logging_handling/log_pool.py
Normal file
91
test-run/logging_handling/log_pool.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Pool Queue log handling
|
||||
Thread Queue log handling
|
||||
"""
|
||||
|
||||
import random
|
||||
import time
|
||||
from multiprocessing import Queue
|
||||
import concurrent.futures
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from corelibs.logging_handling.log import Log
|
||||
from corelibs.logging_handling.logging_level_handling.logging_level import LoggingLevel
|
||||
|
||||
|
||||
def work_function(log_name: str, worker_id: int, data: list[int]) -> int:
|
||||
"""
|
||||
simulate worker
|
||||
|
||||
Arguments:
|
||||
worker_id {int} -- _description_
|
||||
data {list[int]} -- _description_
|
||||
|
||||
Returns:
|
||||
int -- _description_
|
||||
"""
|
||||
log = logging.getLogger(f'{log_name}-WorkerFn-{worker_id}')
|
||||
log.info('Starting worker: %s', worker_id)
|
||||
time.sleep(random.uniform(1, 3))
|
||||
result = sum(data) * worker_id
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Queue log tester
|
||||
"""
|
||||
print("[START] Queue logger test")
|
||||
log_queue: 'Queue[str]' = Queue()
|
||||
script_path: Path = Path(__file__).resolve().parent
|
||||
log = Log(
|
||||
log_path=script_path.joinpath('log', 'test.log'),
|
||||
log_name="Test Log",
|
||||
log_settings={
|
||||
"log_level_console": 'INFO',
|
||||
"log_level_file": 'INFO',
|
||||
"log_queue": log_queue,
|
||||
}
|
||||
)
|
||||
|
||||
log.logger.debug('Pool Fork logging test')
|
||||
max_forks = 2
|
||||
data_sets = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
||||
with concurrent.futures.ProcessPoolExecutor(
|
||||
max_workers=max_forks,
|
||||
initializer=Log.init_worker_logging,
|
||||
initargs=(log_queue,)
|
||||
) as executor:
|
||||
log.logger.info('Start workers')
|
||||
futures = [
|
||||
executor.submit(work_function, log.log_name, worker_id, data)
|
||||
for worker_id, data in enumerate(data_sets, 1)
|
||||
]
|
||||
log.logger.info('Workders started')
|
||||
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
log.logger.warning('Processing result: %s', future.result())
|
||||
print(f"Processing result: {future.result()}")
|
||||
|
||||
log.set_log_level('stream_handler', LoggingLevel.ERROR)
|
||||
log.logger.error('SECOND Start workers')
|
||||
futures = [
|
||||
executor.submit(work_function, log.log_name, worker_id, data)
|
||||
for worker_id, data in enumerate(data_sets, 1)
|
||||
]
|
||||
log.logger.info('[INVISIBLE] Workders started')
|
||||
log.logger.error('[VISIBLE] Second Workders started')
|
||||
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
log.logger.error('Processing result: %s', future.result())
|
||||
print(f"Processing result: {future.result()}")
|
||||
|
||||
log.set_log_level('stream_handler', LoggingLevel.DEBUG)
|
||||
log.logger.info('[END] Queue logger test')
|
||||
log.stop_listener()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
66
test-run/logging_handling/log_queue.py
Normal file
66
test-run/logging_handling/log_queue.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Log logging_handling.log testing
|
||||
"""
|
||||
|
||||
# import atexit
|
||||
from pathlib import Path
|
||||
from multiprocessing import Queue
|
||||
import time
|
||||
# this is for testing only
|
||||
from corelibs.logging_handling.log import Log
|
||||
from corelibs.logging_handling.logging_level_handling.logging_level import LoggingLevel
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Log testing
|
||||
"""
|
||||
script_path: Path = Path(__file__).resolve().parent
|
||||
|
||||
log_queue: 'Queue[str]' = Queue()
|
||||
log_q = Log(
|
||||
log_path=script_path.joinpath('log', 'test_queue.log'),
|
||||
log_name="Test Log",
|
||||
log_settings={
|
||||
"log_level_console": 'WARNING',
|
||||
"log_level_file": 'ERROR',
|
||||
"log_queue": log_queue
|
||||
# "console_color_output_enabled": False,
|
||||
}
|
||||
)
|
||||
|
||||
log_q.logger.debug('[QUEUE] Debug test: %s', log_q.logger.name)
|
||||
log_q.logger.info('[QUEUE] Info test: %s', log_q.logger.name)
|
||||
log_q.logger.warning('[QUEUE] Warning test: %s', log_q.logger.name)
|
||||
log_q.logger.error('[QUEUE] Error test: %s', log_q.logger.name)
|
||||
log_q.logger.critical('[QUEUE] Critical test: %s', log_q.logger.name)
|
||||
log_q.logger.log(LoggingLevel.EXCEPTION.value, '[QUEUE] Exception test: %s', log_q.logger.name, exc_info=True)
|
||||
time.sleep(0.1)
|
||||
|
||||
for handler in log_q.logger.handlers:
|
||||
print(f"[1] Handler (logger) {handler}")
|
||||
if log_q.listener is not None:
|
||||
for handler in log_q.listener.handlers:
|
||||
print(f"[1] Handler (queue) {handler}")
|
||||
for handler in log_q.handlers.items():
|
||||
print(f"[1] Handler (handlers) {handler}")
|
||||
|
||||
log_q.set_log_level('stream_handler', LoggingLevel.ERROR)
|
||||
log_q.logger.warning('[QUEUE-B] [INVISIBLE] Warning test: %s', log_q.logger.name)
|
||||
log_q.logger.error('[QUEUE-B] [VISIBLE] Error test: %s', log_q.logger.name)
|
||||
|
||||
for handler in log_q.logger.handlers:
|
||||
print(f"[2] Handler (logger) {handler}")
|
||||
if log_q.listener is not None:
|
||||
for handler in log_q.listener.handlers:
|
||||
print(f"[2] Handler (queue) {handler}")
|
||||
for handler in log_q.handlers.items():
|
||||
print(f"[2] Handler (handlers) {handler}")
|
||||
|
||||
log_q.stop_listener()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
31
test-run/logging_handling/log_queue_legacy.py
Normal file
31
test-run/logging_handling/log_queue_legacy.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""
|
||||
Log logging_handling.log testing
|
||||
"""
|
||||
|
||||
# import atexit
|
||||
from pathlib import Path
|
||||
from multiprocessing import Queue
|
||||
# this is for testing only
|
||||
from queue_logger.log_queue import QueueLogger
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Log testing
|
||||
"""
|
||||
script_path: Path = Path(__file__).resolve().parent
|
||||
|
||||
log_queue: 'Queue[str]' = Queue()
|
||||
log_q_legacy = QueueLogger(
|
||||
log_file=script_path.joinpath('log', 'test_queue_legacy.log'),
|
||||
log_name="Test Log Queue",
|
||||
log_queue=log_queue
|
||||
)
|
||||
log_q_legacy.mlog.info('Log test: %s', 'Queue Legacy')
|
||||
# log_q.stop_listener()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
96
test-run/logging_handling/queue_logger/log_queue.py
Normal file
96
test-run/logging_handling/queue_logger/log_queue.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""
|
||||
test queue logger interface
|
||||
NOTE: this has all moved to the default log interface
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
from pathlib import Path
|
||||
from multiprocessing import Queue
|
||||
|
||||
|
||||
class QueueLogger:
|
||||
"""
|
||||
Queue logger
|
||||
"""
|
||||
|
||||
def __init__(self, log_file: Path, log_name: str, log_queue: 'Queue[str] | None' = None):
|
||||
self.log_file = log_file
|
||||
self.log_name = log_name
|
||||
self.handlers = self.setup_logging()
|
||||
self.log_queue: 'Queue[str]' = log_queue if log_queue is not None else Queue()
|
||||
self.listener = logging.handlers.QueueListener(self.log_queue, *self.handlers)
|
||||
self.listener.start()
|
||||
|
||||
self.mlog: logging.Logger = self.main_log(log_name)
|
||||
|
||||
def __del__(self):
|
||||
self.mlog.info("[%s] ================================>", "END")
|
||||
self.listener.stop()
|
||||
|
||||
def setup_logging(self):
|
||||
"""
|
||||
setup basic logging
|
||||
"""
|
||||
|
||||
# Create formatters
|
||||
file_formatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - [PID:%(process)d] [%(filename)s:%(lineno)d] - %(message)s'
|
||||
)
|
||||
|
||||
console_formatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
# Create handlers
|
||||
file_handler = logging.FileHandler(self.log_file)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setFormatter(console_formatter)
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
return [file_handler, console_handler]
|
||||
|
||||
def main_log(self, log_name: str) -> logging.Logger:
|
||||
"""
|
||||
main logger
|
||||
|
||||
Arguments:
|
||||
log_name {str} -- _description_
|
||||
|
||||
Returns:
|
||||
logging.Logger -- _description_
|
||||
"""
|
||||
mlog_handler = logging.handlers.QueueHandler(self.log_queue)
|
||||
mlog = logging.getLogger(f'{log_name}-MainProcess')
|
||||
mlog.addHandler(mlog_handler)
|
||||
mlog.setLevel(logging.DEBUG)
|
||||
return mlog
|
||||
|
||||
@staticmethod
|
||||
def init_worker_logging(log_queue: 'Queue[str]', log_name: str, ):
|
||||
"""
|
||||
Initialize logging for worker processes
|
||||
"""
|
||||
|
||||
# Create QueueHandler
|
||||
queue_handler = logging.handlers.QueueHandler(log_queue)
|
||||
|
||||
# Setup root logger for this process
|
||||
# NOTE: This must be EMPTY or new SINGLE NEW logger is created, we need one for EACH fork
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
root_logger.handlers.clear()
|
||||
root_logger.addHandler(queue_handler)
|
||||
|
||||
root_logger.info('[LOGGER] Init log: %s - %s', log_queue, log_name)
|
||||
|
||||
return root_logger
|
||||
|
||||
def stop_listener(self):
|
||||
"""
|
||||
stop the listener
|
||||
"""
|
||||
self.listener.stop()
|
||||
|
Can't render this file because it contains an unexpected character in line 3 and column 165.
|
|
Can't render this file because it is too large.
|
|
Can't render this file because it contains an unexpected character in line 3 and column 165.
|
@@ -9,8 +9,9 @@ from random import randint
|
||||
import sys
|
||||
import io
|
||||
from pathlib import Path
|
||||
from corelibs.file_handling.progress import Progress
|
||||
from corelibs.string_handling.datetime_helpers import convert_timestamp, create_time
|
||||
from corelibs.script_handling.progress import Progress
|
||||
from corelibs.datetime_handling.datetime_helpers import create_time
|
||||
from corelibs.datetime_handling.timestamp_convert import convert_timestamp
|
||||
|
||||
|
||||
def main():
|
||||
101
test-run/string_handling/string_helpers.py
Normal file
101
test-run/string_handling/string_helpers.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Test string_handling/string_helpers
|
||||
"""
|
||||
|
||||
import sys
|
||||
from decimal import Decimal, getcontext
|
||||
from textwrap import shorten
|
||||
from corelibs.string_handling.string_helpers import shorten_string, format_number, prepare_url_slash
|
||||
from corelibs.string_handling.text_colors import Colors
|
||||
|
||||
|
||||
def __sh_shorten_string():
|
||||
string = "hello"
|
||||
length = 3
|
||||
placeholder = " [very long placeholder]"
|
||||
try:
|
||||
result = shorten_string(string, length, placeholder=placeholder)
|
||||
print(f"IN: {string} -> {result}")
|
||||
except ValueError as e:
|
||||
print(f"{Colors.red}Failed: {Colors.bold}{e}{Colors.end}")
|
||||
try:
|
||||
result = shorten(string, width=length, placeholder=placeholder)
|
||||
print(f"IN: {string} -> {result}")
|
||||
except ValueError as e:
|
||||
print(f"Failed: {e}")
|
||||
|
||||
|
||||
def __sh_format_number():
|
||||
print(f"Max int: {sys.maxsize}")
|
||||
print(f"Max float: {sys.float_info.max}")
|
||||
number = 1234.56
|
||||
precision = 0
|
||||
result = format_number(number, precision)
|
||||
print(f"Format {number} ({precision}) -> {result}")
|
||||
number = 1234.56
|
||||
precision = 100
|
||||
result = format_number(number, precision)
|
||||
print(f"Format {number} ({precision}) -> {result}")
|
||||
number = 123400000000000001.56
|
||||
if number >= sys.maxsize:
|
||||
print("INT Number too big")
|
||||
if number >= sys.float_info.max:
|
||||
print("FLOAT Number too big")
|
||||
precision = 5
|
||||
result = format_number(number, precision)
|
||||
print(f"Format {number} ({precision}) -> {result}")
|
||||
|
||||
precision = 100
|
||||
getcontext().prec = precision
|
||||
number = Decimal(str(1234.56))
|
||||
result = f"{number:,.100f}"
|
||||
print(f"Format {number} ({precision}) -> {result}")
|
||||
|
||||
|
||||
def __sh_colors():
|
||||
for color in [
|
||||
"black",
|
||||
"red",
|
||||
"green",
|
||||
"yellow",
|
||||
"blue",
|
||||
"magenta",
|
||||
"cyan",
|
||||
"white",
|
||||
]:
|
||||
for change in ['', '_bold', '_bright']:
|
||||
_color = f"{color}{change}"
|
||||
print(f"Color: {getattr(Colors, _color)}{_color}{Colors.end}")
|
||||
|
||||
print(f"Underline: {Colors.underline}UNDERLINE{Colors.reset}")
|
||||
print(f"Bold: {Colors.bold}BOLD{Colors.reset}")
|
||||
print(f"Underline/Yellow: {Colors.underline}{Colors.yellow}UNDERLINE YELLOW{Colors.reset}")
|
||||
print(f"Underline/Yellow/Bold: {Colors.underline}{Colors.bold}{Colors.yellow}UNDERLINE YELLOW BOLD{Colors.reset}")
|
||||
|
||||
|
||||
def __prepare_url_slash():
|
||||
urls = [
|
||||
"api/v1/resource",
|
||||
"/api/v1/resource",
|
||||
"///api//v1//resource//",
|
||||
"api//v1/resource/",
|
||||
]
|
||||
for url in urls:
|
||||
prepared = prepare_url_slash(url)
|
||||
print(f"IN: {url} -> OUT: {prepared}")
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Test: corelibs.string_handling.string_helpers
|
||||
"""
|
||||
__sh_shorten_string()
|
||||
__sh_format_number()
|
||||
__sh_colors()
|
||||
__prepare_url_slash()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
33
test-run/timestamp_strings/timestamp_strings.py
Normal file
33
test-run/timestamp_strings/timestamp_strings.py
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
|
||||
"""
|
||||
Test for double byte format
|
||||
"""
|
||||
|
||||
from zoneinfo import ZoneInfo
|
||||
from corelibs.datetime_handling.timestamp_strings import TimestampStrings
|
||||
|
||||
|
||||
def main():
|
||||
"""test"""
|
||||
ts = TimestampStrings()
|
||||
print(f"TS: {ts.timestamp_now}")
|
||||
|
||||
try:
|
||||
ts = TimestampStrings("invalid")
|
||||
except ValueError as e:
|
||||
print(f"Value error: {e}")
|
||||
|
||||
ts = TimestampStrings("Europe/Vienna")
|
||||
print(f"TZ: {ts.time_zone} -> TS: {ts.timestamp_now_tz}")
|
||||
ts = TimestampStrings(ZoneInfo("Europe/Vienna"))
|
||||
print(f"TZ: {ts.time_zone} -> TS: {ts.timestamp_now_tz}")
|
||||
custom_tz = 'Europe/Paris'
|
||||
ts = TimestampStrings(time_zone=custom_tz)
|
||||
print(f"TZ: {ts.time_zone} -> TS: {ts.timestamp_now_tz}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
29
test-run/var_handling/enum_base.py
Normal file
29
test-run/var_handling/enum_base.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Enum handling
|
||||
"""
|
||||
|
||||
from corelibs.var_handling.enum_base import EnumBase
|
||||
|
||||
|
||||
class TestBlock(EnumBase):
|
||||
"""Test block enum"""
|
||||
BLOCK_A = "block_a"
|
||||
HAS_NUM = 5
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Comment
|
||||
"""
|
||||
|
||||
print(f"BLOCK A: {TestBlock.from_any('BLOCK_A')}")
|
||||
print(f"HAS NUM: {TestBlock.from_any(5)}")
|
||||
print(f"DIRECT BLOCK: {TestBlock.BLOCK_A.name} -> {TestBlock.BLOCK_A.value}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
# __END__
|
||||
0
tests/integration/__init__.py
Normal file
0
tests/integration/__init__.py
Normal file
0
tests/integration/fixtures/__init__.py
Normal file
0
tests/integration/fixtures/__init__.py
Normal file
0
tests/unit/__init__.py
Normal file
0
tests/unit/__init__.py
Normal file
1
tests/unit/check_handling/__init__.py
Normal file
1
tests/unit/check_handling/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Unit tests for check_handling module."""
|
||||
336
tests/unit/check_handling/test_regex_constants.py
Normal file
336
tests/unit/check_handling/test_regex_constants.py
Normal file
@@ -0,0 +1,336 @@
|
||||
"""
|
||||
Unit tests for regex_constants module.
|
||||
|
||||
Tests all regex patterns defined in the check_handling.regex_constants module.
|
||||
"""
|
||||
|
||||
import re
|
||||
import pytest
|
||||
from corelibs.check_handling.regex_constants import (
|
||||
compile_re,
|
||||
EMAIL_BASIC_REGEX,
|
||||
DOMAIN_WITH_LOCALHOST_REGEX,
|
||||
DOMAIN_WITH_LOCALHOST_PORT_REGEX,
|
||||
DOMAIN_REGEX,
|
||||
)
|
||||
|
||||
|
||||
class TestCompileRe:
|
||||
"""Test cases for the compile_re function."""
|
||||
|
||||
def test_compile_re_returns_pattern(self) -> None:
|
||||
"""Test that compile_re returns a compiled regex Pattern object."""
|
||||
pattern = compile_re(r"test")
|
||||
assert isinstance(pattern, re.Pattern)
|
||||
|
||||
def test_compile_re_with_verbose_flag(self) -> None:
|
||||
"""Test that compile_re compiles with VERBOSE flag."""
|
||||
# Verbose mode allows whitespace and comments in regex
|
||||
verbose_regex = r"""
|
||||
\d+ # digits
|
||||
\s+ # whitespace
|
||||
"""
|
||||
pattern = compile_re(verbose_regex)
|
||||
assert pattern.match("123 ")
|
||||
assert not pattern.match("abc")
|
||||
|
||||
def test_compile_re_simple_pattern(self) -> None:
|
||||
"""Test compile_re with a simple pattern."""
|
||||
pattern = compile_re(r"^\d{3}$")
|
||||
assert pattern.match("123")
|
||||
assert not pattern.match("12")
|
||||
assert not pattern.match("1234")
|
||||
|
||||
|
||||
class TestEmailBasicRegex:
|
||||
"""Test cases for EMAIL_BASIC_REGEX pattern."""
|
||||
|
||||
@pytest.fixture
|
||||
def email_pattern(self) -> re.Pattern[str]:
|
||||
"""Fixture that returns compiled email regex pattern."""
|
||||
return compile_re(EMAIL_BASIC_REGEX)
|
||||
|
||||
@pytest.mark.parametrize("valid_email", [
|
||||
"user@example.com",
|
||||
"test.user@example.com",
|
||||
"user+tag@example.co.uk",
|
||||
"first.last@subdomain.example.com",
|
||||
"user123@test-domain.com",
|
||||
"a@example.com",
|
||||
"user_name@example.com",
|
||||
"user-name@example.com",
|
||||
"user@sub.domain.example.com",
|
||||
"test!#$%&'*+-/=?^_`{|}~@example.com",
|
||||
"1234567890@example.com",
|
||||
"user@example-domain.com",
|
||||
"user@domain.co",
|
||||
# Regex allows these (even if not strictly RFC compliant):
|
||||
"user.@example.com", # ends with dot before @
|
||||
"user..name@example.com", # consecutive dots in local part
|
||||
])
|
||||
def test_valid_emails(
|
||||
self, email_pattern: re.Pattern[str], valid_email: str
|
||||
) -> None:
|
||||
"""Test that valid email addresses match the pattern."""
|
||||
assert email_pattern.match(valid_email), (
|
||||
f"Failed to match valid email: {valid_email}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("invalid_email", [
|
||||
"", # empty string
|
||||
"@example.com", # missing local part
|
||||
"user@", # missing domain
|
||||
"user", # no @ symbol
|
||||
"user@.com", # domain starts with dot
|
||||
"user@domain", # no TLD
|
||||
"user @example.com", # space in local part
|
||||
"user@exam ple.com", # space in domain
|
||||
".user@example.com", # starts with dot
|
||||
"user@-example.com", # domain starts with hyphen
|
||||
"user@example-.com", # domain part ends with hyphen
|
||||
"user@example.c", # TLD too short (1 char)
|
||||
"user@example.toolong", # TLD too long (>6 chars)
|
||||
"user@@example.com", # double @
|
||||
"user@example@com", # multiple @
|
||||
"user@.example.com", # domain starts with dot
|
||||
"user@example.com.", # ends with dot
|
||||
"user@123.456.789.012", # numeric TLD not allowed
|
||||
])
|
||||
def test_invalid_emails(
|
||||
self, email_pattern: re.Pattern[str], invalid_email: str
|
||||
) -> None:
|
||||
"""Test that invalid email addresses do not match the pattern."""
|
||||
assert not email_pattern.match(invalid_email), (
|
||||
f"Incorrectly matched invalid email: {invalid_email}"
|
||||
)
|
||||
|
||||
def test_email_max_local_part_length(
|
||||
self, email_pattern: re.Pattern[str]
|
||||
) -> None:
|
||||
"""Test email with maximum local part length (64 characters)."""
|
||||
# Local part can be up to 64 chars (first char + 63 more)
|
||||
local_part = "a" * 64
|
||||
email = f"{local_part}@example.com"
|
||||
assert email_pattern.match(email)
|
||||
|
||||
def test_email_exceeds_local_part_length(
|
||||
self, email_pattern: re.Pattern[str]
|
||||
) -> None:
|
||||
"""Test email exceeding maximum local part length."""
|
||||
# 65 characters should not match
|
||||
local_part = "a" * 65
|
||||
email = f"{local_part}@example.com"
|
||||
assert not email_pattern.match(email)
|
||||
|
||||
|
||||
class TestDomainWithLocalhostRegex:
|
||||
"""Test cases for DOMAIN_WITH_LOCALHOST_REGEX pattern."""
|
||||
|
||||
@pytest.fixture
|
||||
def domain_localhost_pattern(self) -> re.Pattern[str]:
|
||||
"""Fixture that returns compiled domain with localhost regex pattern."""
|
||||
return compile_re(DOMAIN_WITH_LOCALHOST_REGEX)
|
||||
|
||||
@pytest.mark.parametrize("valid_domain", [
|
||||
"localhost",
|
||||
"example.com",
|
||||
"subdomain.example.com",
|
||||
"sub.domain.example.com",
|
||||
"test-domain.com",
|
||||
"example.co.uk",
|
||||
"a.com",
|
||||
"test123.example.com",
|
||||
"my-site.example.org",
|
||||
"multi.level.subdomain.example.com",
|
||||
])
|
||||
def test_valid_domains(
|
||||
self, domain_localhost_pattern: re.Pattern[str], valid_domain: str
|
||||
) -> None:
|
||||
"""Test that valid domains (including localhost) match the pattern."""
|
||||
assert domain_localhost_pattern.match(valid_domain), (
|
||||
f"Failed to match valid domain: {valid_domain}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("invalid_domain", [
|
||||
"", # empty string
|
||||
"example", # no TLD
|
||||
"-example.com", # starts with hyphen
|
||||
"example-.com", # ends with hyphen
|
||||
".example.com", # starts with dot
|
||||
"example.com.", # ends with dot
|
||||
"example..com", # consecutive dots
|
||||
"exam ple.com", # space in domain
|
||||
"example.c", # TLD too short
|
||||
"localhost:8080", # port not allowed in this pattern
|
||||
"example.com:8080", # port not allowed in this pattern
|
||||
"@example.com", # invalid character
|
||||
"example@com", # invalid character
|
||||
])
|
||||
def test_invalid_domains(
|
||||
self, domain_localhost_pattern: re.Pattern[str], invalid_domain: str
|
||||
) -> None:
|
||||
"""Test that invalid domains do not match the pattern."""
|
||||
assert not domain_localhost_pattern.match(invalid_domain), (
|
||||
f"Incorrectly matched invalid domain: {invalid_domain}"
|
||||
)
|
||||
|
||||
|
||||
class TestDomainWithLocalhostPortRegex:
|
||||
"""Test cases for DOMAIN_WITH_LOCALHOST_PORT_REGEX pattern."""
|
||||
|
||||
@pytest.fixture
|
||||
def domain_localhost_port_pattern(self) -> re.Pattern[str]:
|
||||
"""Fixture that returns compiled domain and localhost with port pattern."""
|
||||
return compile_re(DOMAIN_WITH_LOCALHOST_PORT_REGEX)
|
||||
|
||||
@pytest.mark.parametrize("valid_domain", [
|
||||
"localhost",
|
||||
"localhost:8080",
|
||||
"localhost:3000",
|
||||
"localhost:80",
|
||||
"localhost:443",
|
||||
"localhost:65535",
|
||||
"example.com",
|
||||
"example.com:8080",
|
||||
"subdomain.example.com:3000",
|
||||
"test-domain.com:443",
|
||||
"example.co.uk",
|
||||
"example.co.uk:8000",
|
||||
"a.com:1",
|
||||
"multi.level.subdomain.example.com:9999",
|
||||
])
|
||||
def test_valid_domains_with_port(
|
||||
self, domain_localhost_port_pattern: re.Pattern[str], valid_domain: str
|
||||
) -> None:
|
||||
"""Test that valid domains with optional ports match the pattern."""
|
||||
assert domain_localhost_port_pattern.match(valid_domain), (
|
||||
f"Failed to match valid domain: {valid_domain}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("invalid_domain", [
|
||||
"", # empty string
|
||||
"example", # no TLD
|
||||
"-example.com", # starts with hyphen
|
||||
"example-.com", # ends with hyphen
|
||||
".example.com", # starts with dot
|
||||
"example.com.", # ends with dot
|
||||
"localhost:", # port without number
|
||||
"example.com:", # port without number
|
||||
"example.com:abc", # non-numeric port
|
||||
"example.com: 8080", # space before port
|
||||
"example.com:80 80", # space in port
|
||||
"exam ple.com", # space in domain
|
||||
"localhost :8080", # space before colon
|
||||
])
|
||||
def test_invalid_domains_with_port(
|
||||
self,
|
||||
domain_localhost_port_pattern: re.Pattern[str],
|
||||
invalid_domain: str,
|
||||
) -> None:
|
||||
"""Test that invalid domains do not match the pattern."""
|
||||
assert not domain_localhost_port_pattern.match(invalid_domain), (
|
||||
f"Incorrectly matched invalid domain: {invalid_domain}"
|
||||
)
|
||||
|
||||
def test_large_port_number(
|
||||
self, domain_localhost_port_pattern: re.Pattern[str]
|
||||
) -> None:
|
||||
"""Test domain with large port numbers."""
|
||||
assert domain_localhost_port_pattern.match("example.com:65535")
|
||||
# Regex doesn't validate port range
|
||||
assert domain_localhost_port_pattern.match("example.com:99999")
|
||||
|
||||
|
||||
class TestDomainRegex:
|
||||
"""Test cases for DOMAIN_REGEX pattern (no localhost)."""
|
||||
|
||||
@pytest.fixture
|
||||
def domain_pattern(self) -> re.Pattern[str]:
|
||||
"""Fixture that returns compiled domain regex pattern."""
|
||||
return compile_re(DOMAIN_REGEX)
|
||||
|
||||
@pytest.mark.parametrize("valid_domain", [
|
||||
"example.com",
|
||||
"subdomain.example.com",
|
||||
"sub.domain.example.com",
|
||||
"test-domain.com",
|
||||
"example.co.uk",
|
||||
"a.com",
|
||||
"test123.example.com",
|
||||
"my-site.example.org",
|
||||
"multi.level.subdomain.example.com",
|
||||
"example.co",
|
||||
])
|
||||
def test_valid_domains_no_localhost(
|
||||
self, domain_pattern: re.Pattern[str], valid_domain: str
|
||||
) -> None:
|
||||
"""Test that valid domains match the pattern."""
|
||||
assert domain_pattern.match(valid_domain), (
|
||||
f"Failed to match valid domain: {valid_domain}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("invalid_domain", [
|
||||
"", # empty string
|
||||
"localhost", # localhost not allowed
|
||||
"example", # no TLD
|
||||
"-example.com", # starts with hyphen
|
||||
"example-.com", # ends with hyphen
|
||||
".example.com", # starts with dot
|
||||
"example.com.", # ends with dot
|
||||
"example..com", # consecutive dots
|
||||
"exam ple.com", # space in domain
|
||||
"example.c", # TLD too short
|
||||
"example.com:8080", # port not allowed
|
||||
"@example.com", # invalid character
|
||||
"example@com", # invalid character
|
||||
])
|
||||
def test_invalid_domains_no_localhost(
|
||||
self, domain_pattern: re.Pattern[str], invalid_domain: str
|
||||
) -> None:
|
||||
"""Test that invalid domains do not match the pattern."""
|
||||
assert not domain_pattern.match(invalid_domain), (
|
||||
f"Incorrectly matched invalid domain: {invalid_domain}"
|
||||
)
|
||||
|
||||
def test_localhost_not_allowed(
|
||||
self, domain_pattern: re.Pattern[str]
|
||||
) -> None:
|
||||
"""Test that localhost is explicitly not allowed in DOMAIN_REGEX."""
|
||||
assert not domain_pattern.match("localhost")
|
||||
|
||||
|
||||
class TestRegexPatternConsistency:
|
||||
"""Test cases for consistency across regex patterns."""
|
||||
|
||||
def test_all_patterns_compile(self) -> None:
|
||||
"""Test that all regex patterns can be compiled without errors."""
|
||||
patterns = [
|
||||
EMAIL_BASIC_REGEX,
|
||||
DOMAIN_WITH_LOCALHOST_REGEX,
|
||||
DOMAIN_WITH_LOCALHOST_PORT_REGEX,
|
||||
DOMAIN_REGEX,
|
||||
]
|
||||
for pattern in patterns:
|
||||
compiled = compile_re(pattern)
|
||||
assert isinstance(compiled, re.Pattern)
|
||||
|
||||
def test_domain_patterns_are_strings(self) -> None:
|
||||
"""Test that all regex constants are strings."""
|
||||
assert isinstance(EMAIL_BASIC_REGEX, str)
|
||||
assert isinstance(DOMAIN_WITH_LOCALHOST_REGEX, str)
|
||||
assert isinstance(DOMAIN_WITH_LOCALHOST_PORT_REGEX, str)
|
||||
assert isinstance(DOMAIN_REGEX, str)
|
||||
|
||||
def test_domain_patterns_hierarchy(self) -> None:
|
||||
"""Test that domain patterns follow expected hierarchy."""
|
||||
# DOMAIN_WITH_LOCALHOST_PORT_REGEX should accept everything
|
||||
# DOMAIN_WITH_LOCALHOST_REGEX accepts
|
||||
domain_localhost = compile_re(DOMAIN_WITH_LOCALHOST_REGEX)
|
||||
domain_localhost_port = compile_re(DOMAIN_WITH_LOCALHOST_PORT_REGEX)
|
||||
|
||||
test_cases = ["example.com", "subdomain.example.com", "localhost"]
|
||||
for test_case in test_cases:
|
||||
if domain_localhost.match(test_case):
|
||||
assert domain_localhost_port.match(test_case), (
|
||||
f"{test_case} should match both patterns"
|
||||
)
|
||||
708
tests/unit/config_handling/test_settings_loader.py
Normal file
708
tests/unit/config_handling/test_settings_loader.py
Normal file
@@ -0,0 +1,708 @@
|
||||
"""
|
||||
Unit tests for SettingsLoader class
|
||||
"""
|
||||
|
||||
import configparser
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock
|
||||
import pytest
|
||||
from pytest import CaptureFixture
|
||||
from corelibs.config_handling.settings_loader import SettingsLoader
|
||||
from corelibs.logging_handling.log import Log
|
||||
|
||||
|
||||
class TestSettingsLoaderInit:
|
||||
"""Test cases for SettingsLoader initialization"""
|
||||
|
||||
def test_init_with_valid_config_file(self, tmp_path: Path):
|
||||
"""Test initialization with a valid config file"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[Section]\nkey=value\n")
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={},
|
||||
config_file=config_file,
|
||||
log=None,
|
||||
always_print=False
|
||||
)
|
||||
|
||||
assert loader.args == {}
|
||||
assert loader.config_file == config_file
|
||||
assert loader.log is None
|
||||
assert loader.always_print is False
|
||||
assert loader.config_parser is not None
|
||||
assert isinstance(loader.config_parser, configparser.ConfigParser)
|
||||
|
||||
def test_init_with_missing_config_file(self, tmp_path: Path):
|
||||
"""Test initialization with missing config file"""
|
||||
config_file = tmp_path / "missing.ini"
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={},
|
||||
config_file=config_file,
|
||||
log=None,
|
||||
always_print=False
|
||||
)
|
||||
|
||||
assert loader.config_parser is None
|
||||
|
||||
def test_init_with_invalid_config_folder(self):
|
||||
"""Test initialization with invalid config folder path"""
|
||||
config_file = Path("/nonexistent/path/test.ini")
|
||||
|
||||
with pytest.raises(ValueError, match="Cannot find the config folder"):
|
||||
SettingsLoader(
|
||||
args={},
|
||||
config_file=config_file,
|
||||
log=None,
|
||||
always_print=False
|
||||
)
|
||||
|
||||
def test_init_with_log(self, tmp_path: Path):
|
||||
"""Test initialization with Log object"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[Section]\nkey=value\n")
|
||||
mock_log = Mock(spec=Log)
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={"test": "value"},
|
||||
config_file=config_file,
|
||||
log=mock_log,
|
||||
always_print=True
|
||||
)
|
||||
|
||||
assert loader.log == mock_log
|
||||
assert loader.always_print is True
|
||||
|
||||
|
||||
class TestLoadSettings:
|
||||
"""Test cases for load_settings method"""
|
||||
|
||||
def test_load_settings_basic(self, tmp_path: Path):
|
||||
"""Test loading basic settings without validation"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nkey1=value1\nkey2=value2\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings("TestSection")
|
||||
|
||||
assert result == {"key1": "value1", "key2": "value2"}
|
||||
|
||||
def test_load_settings_with_missing_section(self, tmp_path: Path):
|
||||
"""Test loading settings with missing section"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[OtherSection]\nkey=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Cannot read \\[MissingSection\\]"):
|
||||
loader.load_settings("MissingSection")
|
||||
|
||||
def test_load_settings_allow_not_exist(self, tmp_path: Path):
|
||||
"""Test loading settings with allow_not_exist flag"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[OtherSection]\nkey=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings("MissingSection", allow_not_exist=True)
|
||||
|
||||
assert result == {}
|
||||
|
||||
def test_load_settings_mandatory_field_present(self, tmp_path: Path):
|
||||
"""Test mandatory field validation when field is present"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nrequired_field=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"required_field": ["mandatory:yes"]}
|
||||
)
|
||||
|
||||
assert result["required_field"] == "value"
|
||||
|
||||
def test_load_settings_mandatory_field_missing(self, tmp_path: Path):
|
||||
"""Test mandatory field validation when field is missing"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nother_field=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"required_field": ["mandatory:yes"]}
|
||||
)
|
||||
|
||||
def test_load_settings_mandatory_field_empty(self, tmp_path: Path):
|
||||
"""Test mandatory field validation when field is empty"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nrequired_field=\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"required_field": ["mandatory:yes"]}
|
||||
)
|
||||
|
||||
def test_load_settings_with_split(self, tmp_path: Path):
|
||||
"""Test splitting values into lists"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist_field=a,b,c,d\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list_field": ["split:,"]}
|
||||
)
|
||||
|
||||
assert result["list_field"] == ["a", "b", "c", "d"]
|
||||
|
||||
def test_load_settings_with_custom_split_char(self, tmp_path: Path):
|
||||
"""Test splitting with custom delimiter"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist_field=a|b|c|d\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list_field": ["split:|"]}
|
||||
)
|
||||
|
||||
assert result["list_field"] == ["a", "b", "c", "d"]
|
||||
|
||||
def test_load_settings_split_removes_spaces(self, tmp_path: Path):
|
||||
"""Test that split removes spaces from values"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist_field=a, b , c , d\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list_field": ["split:,"]}
|
||||
)
|
||||
|
||||
assert result["list_field"] == ["a", "b", "c", "d"]
|
||||
|
||||
def test_load_settings_empty_split_char_fallback(self, tmp_path: Path, capsys: CaptureFixture[str]):
|
||||
"""Test fallback to default split char when empty"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist_field=a,b,c\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list_field": ["split:"]}
|
||||
)
|
||||
|
||||
assert result["list_field"] == ["a", "b", "c"]
|
||||
captured = capsys.readouterr()
|
||||
assert "fallback to:" in captured.out
|
||||
|
||||
def test_load_settings_convert_to_int(self, tmp_path: Path):
|
||||
"""Test converting values to int"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=123\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["convert:int"]}
|
||||
)
|
||||
|
||||
assert result["number"] == 123
|
||||
assert isinstance(result["number"], int)
|
||||
|
||||
def test_load_settings_convert_to_float(self, tmp_path: Path):
|
||||
"""Test converting values to float"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=123.45\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["convert:float"]}
|
||||
)
|
||||
|
||||
assert result["number"] == 123.45
|
||||
assert isinstance(result["number"], float)
|
||||
|
||||
def test_load_settings_convert_to_bool_true(self, tmp_path: Path):
|
||||
"""Test converting values to boolean True"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nflag1=true\nflag2=True\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"flag1": ["convert:bool"], "flag2": ["convert:bool"]}
|
||||
)
|
||||
|
||||
assert result["flag1"] is True
|
||||
assert result["flag2"] is True
|
||||
|
||||
def test_load_settings_convert_to_bool_false(self, tmp_path: Path):
|
||||
"""Test converting values to boolean False"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nflag1=false\nflag2=False\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"flag1": ["convert:bool"], "flag2": ["convert:bool"]}
|
||||
)
|
||||
|
||||
assert result["flag1"] is False
|
||||
assert result["flag2"] is False
|
||||
|
||||
def test_load_settings_convert_invalid_type(self, tmp_path: Path):
|
||||
"""Test converting with invalid type raises error"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=test\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="convert type is invalid"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["convert:invalid"]}
|
||||
)
|
||||
|
||||
def test_load_settings_empty_set_to_none(self, tmp_path: Path):
|
||||
"""Test setting empty values to None"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nother=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"field": ["empty:"]}
|
||||
)
|
||||
|
||||
assert result["field"] is None
|
||||
|
||||
def test_load_settings_empty_set_to_custom_value(self, tmp_path: Path):
|
||||
"""Test setting empty values to custom value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nother=value\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"field": ["empty:default"]}
|
||||
)
|
||||
|
||||
assert result["field"] == "default"
|
||||
|
||||
def test_load_settings_matching_valid(self, tmp_path: Path):
|
||||
"""Test matching validation with valid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nmode=production\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"mode": ["matching:development|staging|production"]}
|
||||
)
|
||||
|
||||
assert result["mode"] == "production"
|
||||
|
||||
def test_load_settings_matching_invalid(self, tmp_path: Path):
|
||||
"""Test matching validation with invalid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nmode=invalid\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"mode": ["matching:development|staging|production"]}
|
||||
)
|
||||
|
||||
def test_load_settings_in_valid(self, tmp_path: Path):
|
||||
"""Test 'in' validation with valid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nallowed=a,b,c\nvalue=b\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{
|
||||
"allowed": ["split:,"],
|
||||
"value": ["in:allowed"]
|
||||
}
|
||||
)
|
||||
|
||||
assert result["value"] == "b"
|
||||
|
||||
def test_load_settings_in_invalid(self, tmp_path: Path):
|
||||
"""Test 'in' validation with invalid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nallowed=a,b,c\nvalue=d\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{
|
||||
"allowed": ["split:,"],
|
||||
"value": ["in:allowed"]
|
||||
}
|
||||
)
|
||||
|
||||
def test_load_settings_in_missing_target(self, tmp_path: Path):
|
||||
"""Test 'in' validation with missing target"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=a\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["in:missing_target"]}
|
||||
)
|
||||
|
||||
def test_load_settings_length_exact(self, tmp_path: Path):
|
||||
"""Test length validation with exact match"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=test\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["length:4"]}
|
||||
)
|
||||
|
||||
assert result["value"] == "test"
|
||||
|
||||
def test_load_settings_length_exact_invalid(self, tmp_path: Path):
|
||||
"""Test length validation with exact match failure"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=test\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["length:5"]}
|
||||
)
|
||||
|
||||
def test_load_settings_length_range(self, tmp_path: Path):
|
||||
"""Test length validation with range"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=testing\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["length:5-10"]}
|
||||
)
|
||||
|
||||
assert result["value"] == "testing"
|
||||
|
||||
def test_load_settings_length_min_only(self, tmp_path: Path):
|
||||
"""Test length validation with minimum only"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=testing\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["length:5-"]}
|
||||
)
|
||||
|
||||
assert result["value"] == "testing"
|
||||
|
||||
def test_load_settings_length_max_only(self, tmp_path: Path):
|
||||
"""Test length validation with maximum only"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=test\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["length:-10"]}
|
||||
)
|
||||
|
||||
assert result["value"] == "test"
|
||||
|
||||
def test_load_settings_range_valid(self, tmp_path: Path):
|
||||
"""Test range validation with valid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=25\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["range:10-50"]}
|
||||
)
|
||||
|
||||
assert result["number"] == "25"
|
||||
|
||||
def test_load_settings_range_invalid(self, tmp_path: Path):
|
||||
"""Test range validation with invalid value"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=100\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["range:10-50"]}
|
||||
)
|
||||
|
||||
def test_load_settings_check_int_valid(self, tmp_path: Path):
|
||||
"""Test check:int with valid integer"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=12345\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["check:int"]}
|
||||
)
|
||||
|
||||
assert result["number"] == "12345"
|
||||
|
||||
def test_load_settings_check_int_cleanup(self, tmp_path: Path):
|
||||
"""Test check:int with cleanup"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nnumber=12a34b5\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"number": ["check:int"]}
|
||||
)
|
||||
|
||||
assert result["number"] == "12345"
|
||||
|
||||
def test_load_settings_check_email_valid(self, tmp_path: Path):
|
||||
"""Test check:string.email.basic with valid email"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nemail=test@example.com\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"email": ["check:string.email.basic"]}
|
||||
)
|
||||
|
||||
assert result["email"] == "test@example.com"
|
||||
|
||||
def test_load_settings_check_email_invalid(self, tmp_path: Path):
|
||||
"""Test check:string.email.basic with invalid email"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nemail=not-an-email\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Missing or incorrect settings data"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"email": ["check:string.email.basic"]}
|
||||
)
|
||||
|
||||
def test_load_settings_args_override(self, tmp_path: Path, capsys: CaptureFixture[str]):
|
||||
"""Test command line arguments override config values"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=config_value\n")
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={"value": "arg_value"},
|
||||
config_file=config_file
|
||||
)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": []}
|
||||
)
|
||||
|
||||
assert result["value"] == "arg_value"
|
||||
captured = capsys.readouterr()
|
||||
assert "Command line option override" in captured.out
|
||||
|
||||
def test_load_settings_no_config_file_with_args(self, tmp_path: Path):
|
||||
"""Test loading settings without config file but with mandatory args"""
|
||||
config_file = tmp_path / "missing.ini"
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={"required": "value"},
|
||||
config_file=config_file
|
||||
)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"required": ["mandatory:yes"]}
|
||||
)
|
||||
|
||||
assert result["required"] == "value"
|
||||
|
||||
def test_load_settings_no_config_file_missing_args(self, tmp_path: Path):
|
||||
"""Test loading settings without config file and missing args"""
|
||||
config_file = tmp_path / "missing.ini"
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Cannot find file"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"required": ["mandatory:yes"]}
|
||||
)
|
||||
|
||||
def test_load_settings_check_list_with_split(self, tmp_path: Path):
|
||||
"""Test check validation with list values"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist=abc,def,ghi\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list": ["split:,", "check:string.alphanumeric"]}
|
||||
)
|
||||
|
||||
assert result["list"] == ["abc", "def", "ghi"]
|
||||
|
||||
def test_load_settings_check_list_cleanup(self, tmp_path: Path):
|
||||
"""Test check validation cleans up list values"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nlist=ab-c,de_f,gh!i\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"TestSection",
|
||||
{"list": ["split:,", "check:string.alphanumeric"]}
|
||||
)
|
||||
|
||||
assert result["list"] == ["abc", "def", "ghi"]
|
||||
|
||||
def test_load_settings_invalid_check_type(self, tmp_path: Path):
|
||||
"""Test with invalid check type"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text("[TestSection]\nvalue=test\n")
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
|
||||
with pytest.raises(ValueError, match="Cannot get SettingsLoaderCheck.CHECK_SETTINGS"):
|
||||
loader.load_settings(
|
||||
"TestSection",
|
||||
{"value": ["check:invalid.check.type"]}
|
||||
)
|
||||
|
||||
|
||||
class TestComplexScenarios:
|
||||
"""Test cases for complex real-world scenarios"""
|
||||
|
||||
def test_complex_validation_scenario(self, tmp_path: Path):
|
||||
"""Test complex scenario with multiple validations"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text(
|
||||
"[Production]\n"
|
||||
"environment=production\n"
|
||||
"allowed_envs=development,staging,production\n"
|
||||
"port=8080\n"
|
||||
"host=example.com\n"
|
||||
"timeout=30\n"
|
||||
"debug=false\n"
|
||||
"features=auth,logging,monitoring\n"
|
||||
)
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"Production",
|
||||
{
|
||||
"environment": [
|
||||
"mandatory:yes",
|
||||
"matching:development|staging|production",
|
||||
"in:allowed_envs"
|
||||
],
|
||||
"allowed_envs": ["split:,"],
|
||||
"port": ["mandatory:yes", "convert:int", "range:1-65535"],
|
||||
"host": ["mandatory:yes"],
|
||||
"timeout": ["convert:int", "range:1-"],
|
||||
"debug": ["convert:bool"],
|
||||
"features": ["split:,", "check:string.alphanumeric"],
|
||||
}
|
||||
)
|
||||
|
||||
assert result["environment"] == "production"
|
||||
assert result["allowed_envs"] == ["development", "staging", "production"]
|
||||
assert result["port"] == 8080
|
||||
assert isinstance(result["port"], int)
|
||||
assert result["host"] == "example.com"
|
||||
assert result["timeout"] == 30
|
||||
assert result["debug"] is False
|
||||
assert result["features"] == ["auth", "logging", "monitoring"]
|
||||
|
||||
def test_email_list_validation(self, tmp_path: Path):
|
||||
"""Test email list with validation"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text(
|
||||
"[EmailConfig]\n"
|
||||
"emails=test@example.com,admin@domain.org,user+tag@site.co.uk\n"
|
||||
)
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"EmailConfig",
|
||||
{"emails": ["split:,", "mandatory:yes", "check:string.email.basic"]}
|
||||
)
|
||||
|
||||
assert len(result["emails"]) == 3
|
||||
assert "test@example.com" in result["emails"]
|
||||
|
||||
def test_mixed_args_and_config(self, tmp_path: Path):
|
||||
"""Test mixing command line args and config file"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text(
|
||||
"[Settings]\n"
|
||||
"value1=config_value1\n"
|
||||
"value2=config_value2\n"
|
||||
)
|
||||
|
||||
loader = SettingsLoader(
|
||||
args={"value1": "arg_value1"},
|
||||
config_file=config_file
|
||||
)
|
||||
result = loader.load_settings(
|
||||
"Settings",
|
||||
{"value1": [], "value2": []}
|
||||
)
|
||||
|
||||
assert result["value1"] == "arg_value1" # Overridden by arg
|
||||
assert result["value2"] == "config_value2" # From config
|
||||
|
||||
def test_multiple_check_types(self, tmp_path: Path):
|
||||
"""Test multiple different check types"""
|
||||
config_file = tmp_path / "test.ini"
|
||||
config_file.write_text(
|
||||
"[Checks]\n"
|
||||
"numbers=123,456,789\n"
|
||||
"alphas=abc,def,ghi\n"
|
||||
"emails=test@example.com\n"
|
||||
"date=2025-01-15\n"
|
||||
)
|
||||
|
||||
loader = SettingsLoader(args={}, config_file=config_file)
|
||||
result = loader.load_settings(
|
||||
"Checks",
|
||||
{
|
||||
"numbers": ["split:,", "check:int"],
|
||||
"alphas": ["split:,", "check:string.alphanumeric"],
|
||||
"emails": ["check:string.email.basic"],
|
||||
"date": ["check:string.date"],
|
||||
}
|
||||
)
|
||||
|
||||
assert result["numbers"] == ["123", "456", "789"]
|
||||
assert result["alphas"] == ["abc", "def", "ghi"]
|
||||
assert result["emails"] == "test@example.com"
|
||||
assert result["date"] == "2025-01-15"
|
||||
|
||||
|
||||
# __END__
|
||||
3
tests/unit/datetime_handling/__init__.py
Normal file
3
tests/unit/datetime_handling/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Unit tests for encryption_handling module
|
||||
"""
|
||||
205
tests/unit/datetime_handling/test_convert_to_seconds.py
Normal file
205
tests/unit/datetime_handling/test_convert_to_seconds.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""
|
||||
Unit tests for convert_to_seconds function from timestamp_strings module.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from corelibs.datetime_handling.timestamp_convert import convert_to_seconds, TimeParseError, TimeUnitError
|
||||
|
||||
|
||||
class TestConvertToSeconds:
|
||||
"""Test class for convert_to_seconds function."""
|
||||
|
||||
def test_numeric_input_int(self):
|
||||
"""Test with integer input."""
|
||||
assert convert_to_seconds(42) == 42
|
||||
assert convert_to_seconds(0) == 0
|
||||
assert convert_to_seconds(-5) == -5
|
||||
|
||||
def test_numeric_input_float(self):
|
||||
"""Test with float input."""
|
||||
assert convert_to_seconds(42.7) == 43 # rounds to 43
|
||||
assert convert_to_seconds(42.3) == 42 # rounds to 42
|
||||
assert convert_to_seconds(42.5) == 42 # rounds to 42 (banker's rounding)
|
||||
assert convert_to_seconds(0.0) == 0
|
||||
assert convert_to_seconds(-5.7) == -6
|
||||
|
||||
def test_numeric_string_input(self):
|
||||
"""Test with numeric string input."""
|
||||
assert convert_to_seconds("42") == 42
|
||||
assert convert_to_seconds("42.7") == 43
|
||||
assert convert_to_seconds("42.3") == 42
|
||||
assert convert_to_seconds("0") == 0
|
||||
assert convert_to_seconds("-5.7") == -6
|
||||
|
||||
def test_single_unit_seconds(self):
|
||||
"""Test with seconds unit."""
|
||||
assert convert_to_seconds("30s") == 30
|
||||
assert convert_to_seconds("1s") == 1
|
||||
assert convert_to_seconds("0s") == 0
|
||||
|
||||
def test_single_unit_minutes(self):
|
||||
"""Test with minutes unit."""
|
||||
assert convert_to_seconds("5m") == 300 # 5 * 60
|
||||
assert convert_to_seconds("1m") == 60
|
||||
assert convert_to_seconds("0m") == 0
|
||||
|
||||
def test_single_unit_hours(self):
|
||||
"""Test with hours unit."""
|
||||
assert convert_to_seconds("2h") == 7200 # 2 * 3600
|
||||
assert convert_to_seconds("1h") == 3600
|
||||
assert convert_to_seconds("0h") == 0
|
||||
|
||||
def test_single_unit_days(self):
|
||||
"""Test with days unit."""
|
||||
assert convert_to_seconds("1d") == 86400 # 1 * 86400
|
||||
assert convert_to_seconds("2d") == 172800 # 2 * 86400
|
||||
assert convert_to_seconds("0d") == 0
|
||||
|
||||
def test_single_unit_months(self):
|
||||
"""Test with months unit (30 days * 12 = 1 year)."""
|
||||
# Note: The code has M: 2592000 * 12 which is 1 year, not 1 month
|
||||
# This seems like a bug in the original code, but testing what it actually does
|
||||
assert convert_to_seconds("1M") == 31104000 # 2592000 * 12
|
||||
assert convert_to_seconds("2M") == 62208000 # 2 * 2592000 * 12
|
||||
|
||||
def test_single_unit_years(self):
|
||||
"""Test with years unit."""
|
||||
assert convert_to_seconds("1Y") == 31536000 # 365 * 86400
|
||||
assert convert_to_seconds("2Y") == 63072000 # 2 * 365 * 86400
|
||||
|
||||
def test_long_unit_names(self):
|
||||
"""Test with long unit names."""
|
||||
assert convert_to_seconds("1year") == 31536000
|
||||
assert convert_to_seconds("2years") == 63072000
|
||||
assert convert_to_seconds("1month") == 31104000
|
||||
assert convert_to_seconds("2months") == 62208000
|
||||
assert convert_to_seconds("1day") == 86400
|
||||
assert convert_to_seconds("2days") == 172800
|
||||
assert convert_to_seconds("1hour") == 3600
|
||||
assert convert_to_seconds("2hours") == 7200
|
||||
assert convert_to_seconds("1minute") == 60
|
||||
assert convert_to_seconds("2minutes") == 120
|
||||
assert convert_to_seconds("30min") == 1800
|
||||
assert convert_to_seconds("1second") == 1
|
||||
assert convert_to_seconds("2seconds") == 2
|
||||
assert convert_to_seconds("30sec") == 30
|
||||
|
||||
def test_multiple_units(self):
|
||||
"""Test with multiple units combined."""
|
||||
assert convert_to_seconds("1h30m") == 5400 # 3600 + 1800
|
||||
assert convert_to_seconds("1d2h") == 93600 # 86400 + 7200
|
||||
assert convert_to_seconds("1h30m45s") == 5445 # 3600 + 1800 + 45
|
||||
assert convert_to_seconds("2d3h4m5s") == 183845 # 172800 + 10800 + 240 + 5
|
||||
|
||||
def test_multiple_units_with_spaces(self):
|
||||
"""Test with multiple units and spaces."""
|
||||
assert convert_to_seconds("1h 30m") == 5400
|
||||
assert convert_to_seconds("1d 2h") == 93600
|
||||
assert convert_to_seconds("1h 30m 45s") == 5445
|
||||
assert convert_to_seconds("2d 3h 4m 5s") == 183845
|
||||
|
||||
def test_mixed_unit_formats(self):
|
||||
"""Test with mixed short and long unit names."""
|
||||
assert convert_to_seconds("1hour 30min") == 5400
|
||||
assert convert_to_seconds("1day 2hours") == 93600
|
||||
assert convert_to_seconds("1h 30minutes 45sec") == 5445
|
||||
|
||||
def test_negative_values(self):
|
||||
"""Test with negative time strings."""
|
||||
assert convert_to_seconds("-30s") == -30
|
||||
assert convert_to_seconds("-1h") == -3600
|
||||
assert convert_to_seconds("-1h30m") == -5400
|
||||
assert convert_to_seconds("-2d3h4m5s") == -183845
|
||||
|
||||
def test_case_insensitive_long_names(self):
|
||||
"""Test that long unit names are case insensitive."""
|
||||
assert convert_to_seconds("1Hour") == 3600
|
||||
assert convert_to_seconds("1MINUTE") == 60
|
||||
assert convert_to_seconds("1Day") == 86400
|
||||
assert convert_to_seconds("2YEARS") == 63072000
|
||||
|
||||
def test_duplicate_units_error(self):
|
||||
"""Test that duplicate units raise TimeParseError."""
|
||||
with pytest.raises(TimeParseError, match="Unit 'h' appears more than once"):
|
||||
convert_to_seconds("1h2h")
|
||||
|
||||
with pytest.raises(TimeParseError, match="Unit 's' appears more than once"):
|
||||
convert_to_seconds("30s45s")
|
||||
|
||||
with pytest.raises(TimeParseError, match="Unit 'm' appears more than once"):
|
||||
convert_to_seconds("1m30m")
|
||||
|
||||
def test_invalid_units_error(self):
|
||||
"""Test that invalid units raise TimeUnitError."""
|
||||
with pytest.raises(TimeUnitError, match="Unit 'x' is not a valid unit name"):
|
||||
convert_to_seconds("30x")
|
||||
|
||||
with pytest.raises(TimeUnitError, match="Unit 'invalid' is not a valid unit name"):
|
||||
convert_to_seconds("1invalid")
|
||||
|
||||
with pytest.raises(TimeUnitError, match="Unit 'z' is not a valid unit name"):
|
||||
convert_to_seconds("1h30z")
|
||||
|
||||
def test_empty_string(self):
|
||||
"""Test with empty string."""
|
||||
assert convert_to_seconds("") == 0
|
||||
|
||||
def test_no_matches(self):
|
||||
"""Test with string that has no time units."""
|
||||
assert convert_to_seconds("hello") == 0
|
||||
assert convert_to_seconds("no time here") == 0
|
||||
|
||||
def test_zero_values(self):
|
||||
"""Test with zero values for different units."""
|
||||
assert convert_to_seconds("0s") == 0
|
||||
assert convert_to_seconds("0m") == 0
|
||||
assert convert_to_seconds("0h") == 0
|
||||
assert convert_to_seconds("0d") == 0
|
||||
assert convert_to_seconds("0h0m0s") == 0
|
||||
|
||||
def test_large_values(self):
|
||||
"""Test with large time values."""
|
||||
assert convert_to_seconds("999d") == 86313600 # 999 * 86400
|
||||
assert convert_to_seconds("100Y") == 3153600000 # 100 * 31536000
|
||||
|
||||
def test_order_independence(self):
|
||||
"""Test that order of units doesn't matter."""
|
||||
assert convert_to_seconds("30m1h") == 5400 # same as 1h30m
|
||||
assert convert_to_seconds("45s30m1h") == 5445 # same as 1h30m45s
|
||||
assert convert_to_seconds("5s4m3h2d") == 183845 # same as 2d3h4m5s
|
||||
|
||||
def test_whitespace_handling(self):
|
||||
"""Test various whitespace scenarios."""
|
||||
assert convert_to_seconds("1 h") == 3600
|
||||
assert convert_to_seconds("1h 30m") == 5400
|
||||
assert convert_to_seconds(" 1h30m ") == 5400
|
||||
assert convert_to_seconds("1h\t30m") == 5400
|
||||
|
||||
def test_mixed_case_short_units(self):
|
||||
"""Test that short units work with different cases."""
|
||||
# Note: The regex only matches [a-zA-Z]+ so case matters for the lookup
|
||||
with pytest.raises(TimeUnitError, match="Unit 'H' is not a valid unit name"):
|
||||
convert_to_seconds("1H") # 'H' is not in unit_factors, raises error
|
||||
assert convert_to_seconds("1h") == 3600 # lowercase works
|
||||
|
||||
def test_boundary_conditions(self):
|
||||
"""Test boundary conditions and edge cases."""
|
||||
# Test with leading zeros
|
||||
assert convert_to_seconds("01h") == 3600
|
||||
assert convert_to_seconds("001m") == 60
|
||||
|
||||
# Test very small values
|
||||
assert convert_to_seconds("1s") == 1
|
||||
|
||||
def test_negative_with_multiple_units(self):
|
||||
"""Test negative values with multiple units."""
|
||||
assert convert_to_seconds("-1h30m45s") == -5445
|
||||
assert convert_to_seconds("-2d3h") == -183600
|
||||
|
||||
def test_duplicate_with_long_names(self):
|
||||
"""Test duplicate detection with long unit names."""
|
||||
with pytest.raises(TimeParseError, match="Unit 'h' appears more than once"):
|
||||
convert_to_seconds("1hour2h") # both resolve to 'h'
|
||||
|
||||
with pytest.raises(TimeParseError, match="Unit 's' appears more than once"):
|
||||
convert_to_seconds("1second30sec") # both resolve to 's'
|
||||
737
tests/unit/datetime_handling/test_datetime_helpers.py
Normal file
737
tests/unit/datetime_handling/test_datetime_helpers.py
Normal file
@@ -0,0 +1,737 @@
|
||||
"""
|
||||
PyTest: datetime_handling/datetime_helpers
|
||||
"""
|
||||
|
||||
from datetime import datetime, time
|
||||
from zoneinfo import ZoneInfo
|
||||
import pytest
|
||||
|
||||
from corelibs.datetime_handling.datetime_helpers import (
|
||||
create_time,
|
||||
get_system_timezone,
|
||||
parse_timezone_data,
|
||||
get_datetime_iso8601,
|
||||
validate_date,
|
||||
parse_flexible_date,
|
||||
compare_dates,
|
||||
find_newest_datetime_in_list,
|
||||
parse_day_of_week_range,
|
||||
parse_time_range,
|
||||
times_overlap_or_connect,
|
||||
is_time_in_range,
|
||||
reorder_weekdays_from_today,
|
||||
DAYS_OF_WEEK_LONG_TO_SHORT,
|
||||
DAYS_OF_WEEK_ISO,
|
||||
DAYS_OF_WEEK_ISO_REVERSED,
|
||||
)
|
||||
|
||||
|
||||
class TestConstants:
|
||||
"""Test suite for module constants"""
|
||||
|
||||
def test_days_of_week_long_to_short(self):
|
||||
"""Test DAYS_OF_WEEK_LONG_TO_SHORT dictionary"""
|
||||
assert DAYS_OF_WEEK_LONG_TO_SHORT['Monday'] == 'Mon'
|
||||
assert DAYS_OF_WEEK_LONG_TO_SHORT['Tuesday'] == 'Tue'
|
||||
assert DAYS_OF_WEEK_LONG_TO_SHORT['Friday'] == 'Fri'
|
||||
assert DAYS_OF_WEEK_LONG_TO_SHORT['Sunday'] == 'Sun'
|
||||
assert len(DAYS_OF_WEEK_LONG_TO_SHORT) == 7
|
||||
|
||||
def test_days_of_week_iso(self):
|
||||
"""Test DAYS_OF_WEEK_ISO dictionary"""
|
||||
assert DAYS_OF_WEEK_ISO[1] == 'Mon'
|
||||
assert DAYS_OF_WEEK_ISO[5] == 'Fri'
|
||||
assert DAYS_OF_WEEK_ISO[7] == 'Sun'
|
||||
assert len(DAYS_OF_WEEK_ISO) == 7
|
||||
|
||||
def test_days_of_week_iso_reversed(self):
|
||||
"""Test DAYS_OF_WEEK_ISO_REVERSED dictionary"""
|
||||
assert DAYS_OF_WEEK_ISO_REVERSED['Mon'] == 1
|
||||
assert DAYS_OF_WEEK_ISO_REVERSED['Fri'] == 5
|
||||
assert DAYS_OF_WEEK_ISO_REVERSED['Sun'] == 7
|
||||
assert len(DAYS_OF_WEEK_ISO_REVERSED) == 7
|
||||
|
||||
|
||||
class TestCreateTime:
|
||||
"""Test suite for create_time function"""
|
||||
|
||||
def test_create_time_default_format(self):
|
||||
"""Test create_time with default format"""
|
||||
timestamp = 1609459200.0 # 2021-01-01 00:00:00 UTC
|
||||
result = create_time(timestamp)
|
||||
# Result depends on system timezone, so just check format
|
||||
assert len(result) == 19
|
||||
assert '-' in result
|
||||
assert ':' in result
|
||||
|
||||
def test_create_time_custom_format(self):
|
||||
"""Test create_time with custom format"""
|
||||
timestamp = 1609459200.0
|
||||
result = create_time(timestamp, "%Y/%m/%d")
|
||||
# Check basic format structure
|
||||
assert '/' in result
|
||||
assert len(result) == 10
|
||||
|
||||
def test_create_time_with_microseconds(self):
|
||||
"""Test create_time with microseconds in format"""
|
||||
timestamp = 1609459200.123456
|
||||
result = create_time(timestamp, "%Y-%m-%d %H:%M:%S")
|
||||
assert len(result) == 19
|
||||
|
||||
|
||||
class TestGetSystemTimezone:
|
||||
"""Test suite for get_system_timezone function"""
|
||||
|
||||
def test_get_system_timezone_returns_tuple(self):
|
||||
"""Test that get_system_timezone returns a tuple"""
|
||||
result = get_system_timezone()
|
||||
assert isinstance(result, tuple)
|
||||
assert len(result) == 2
|
||||
|
||||
def test_get_system_timezone_returns_valid_data(self):
|
||||
"""Test that get_system_timezone returns valid timezone info"""
|
||||
system_tz, timezone_name = get_system_timezone()
|
||||
assert system_tz is not None
|
||||
assert isinstance(timezone_name, str)
|
||||
assert len(timezone_name) > 0
|
||||
|
||||
|
||||
class TestParseTimezoneData:
|
||||
"""Test suite for parse_timezone_data function"""
|
||||
|
||||
def test_parse_timezone_data_valid_timezone(self):
|
||||
"""Test parse_timezone_data with valid timezone string"""
|
||||
result = parse_timezone_data('Asia/Tokyo')
|
||||
assert isinstance(result, ZoneInfo)
|
||||
assert str(result) == 'Asia/Tokyo'
|
||||
|
||||
def test_parse_timezone_data_utc(self):
|
||||
"""Test parse_timezone_data with UTC"""
|
||||
result = parse_timezone_data('UTC')
|
||||
assert isinstance(result, ZoneInfo)
|
||||
assert str(result) == 'UTC'
|
||||
|
||||
def test_parse_timezone_data_empty_string(self):
|
||||
"""Test parse_timezone_data with empty string falls back to system timezone"""
|
||||
result = parse_timezone_data('')
|
||||
assert isinstance(result, ZoneInfo)
|
||||
|
||||
def test_parse_timezone_data_invalid_timezone(self):
|
||||
"""Test parse_timezone_data with invalid timezone falls back to system timezone"""
|
||||
# Invalid timezones fall back to system timezone or UTC
|
||||
result = parse_timezone_data('Invalid/Timezone')
|
||||
assert isinstance(result, ZoneInfo)
|
||||
# Should be either system timezone or UTC
|
||||
|
||||
def test_parse_timezone_data_none(self):
|
||||
"""Test parse_timezone_data with None falls back to system timezone"""
|
||||
result = parse_timezone_data()
|
||||
assert isinstance(result, ZoneInfo)
|
||||
|
||||
def test_parse_timezone_data_various_timezones(self):
|
||||
"""Test parse_timezone_data with various timezone strings"""
|
||||
timezones = ['America/New_York', 'Europe/London', 'Asia/Seoul']
|
||||
for tz in timezones:
|
||||
result = parse_timezone_data(tz)
|
||||
assert isinstance(result, ZoneInfo)
|
||||
assert str(result) == tz
|
||||
|
||||
|
||||
class TestGetDatetimeIso8601:
|
||||
"""Test suite for get_datetime_iso8601 function"""
|
||||
|
||||
def test_get_datetime_iso8601_default_params(self):
|
||||
"""Test get_datetime_iso8601 with default parameters"""
|
||||
result = get_datetime_iso8601()
|
||||
# Should be in ISO 8601 format with T separator and microseconds
|
||||
assert 'T' in result
|
||||
assert '.' in result # microseconds
|
||||
# Check basic ISO 8601 format
|
||||
datetime.fromisoformat(result) # Should not raise
|
||||
|
||||
def test_get_datetime_iso8601_custom_timezone_string(self):
|
||||
"""Test get_datetime_iso8601 with custom timezone string"""
|
||||
result = get_datetime_iso8601('UTC')
|
||||
assert '+00:00' in result or 'Z' in result or result.endswith('+00:00')
|
||||
|
||||
def test_get_datetime_iso8601_custom_timezone_zoneinfo(self):
|
||||
"""Test get_datetime_iso8601 with ZoneInfo object"""
|
||||
tz = ZoneInfo('Asia/Tokyo')
|
||||
result = get_datetime_iso8601(tz)
|
||||
assert 'T' in result
|
||||
datetime.fromisoformat(result) # Should not raise
|
||||
|
||||
def test_get_datetime_iso8601_custom_separator(self):
|
||||
"""Test get_datetime_iso8601 with custom separator"""
|
||||
result = get_datetime_iso8601(sep=' ')
|
||||
assert ' ' in result
|
||||
assert 'T' not in result
|
||||
|
||||
def test_get_datetime_iso8601_different_timespec(self):
|
||||
"""Test get_datetime_iso8601 with different timespec values"""
|
||||
result_seconds = get_datetime_iso8601(timespec='seconds')
|
||||
assert '.' not in result_seconds # No microseconds
|
||||
|
||||
result_milliseconds = get_datetime_iso8601(timespec='milliseconds')
|
||||
# Should have milliseconds (3 digits after decimal)
|
||||
assert '.' in result_milliseconds
|
||||
|
||||
|
||||
class TestValidateDate:
|
||||
"""Test suite for validate_date function"""
|
||||
|
||||
def test_validate_date_valid_hyphen_format(self):
|
||||
"""Test validate_date with valid Y-m-d format"""
|
||||
assert validate_date('2023-12-25') is True
|
||||
assert validate_date('2024-01-01') is True
|
||||
|
||||
def test_validate_date_valid_slash_format(self):
|
||||
"""Test validate_date with valid Y/m/d format"""
|
||||
assert validate_date('2023/12/25') is True
|
||||
assert validate_date('2024/01/01') is True
|
||||
|
||||
def test_validate_date_invalid_format(self):
|
||||
"""Test validate_date with invalid format"""
|
||||
assert validate_date('25-12-2023') is False
|
||||
assert validate_date('2023.12.25') is False
|
||||
assert validate_date('invalid') is False
|
||||
|
||||
def test_validate_date_invalid_date(self):
|
||||
"""Test validate_date with invalid date values"""
|
||||
assert validate_date('2023-13-01') is False # Invalid month
|
||||
assert validate_date('2023-02-30') is False # Invalid day
|
||||
|
||||
def test_validate_date_with_not_before(self):
|
||||
"""Test validate_date with not_before constraint"""
|
||||
not_before = datetime(2023, 12, 1)
|
||||
assert validate_date('2023-12-25', not_before=not_before) is True
|
||||
assert validate_date('2023-11-25', not_before=not_before) is False
|
||||
|
||||
def test_validate_date_with_not_after(self):
|
||||
"""Test validate_date with not_after constraint"""
|
||||
not_after = datetime(2023, 12, 31)
|
||||
assert validate_date('2023-12-25', not_after=not_after) is True
|
||||
assert validate_date('2024-01-01', not_after=not_after) is False
|
||||
|
||||
def test_validate_date_with_both_constraints(self):
|
||||
"""Test validate_date with both not_before and not_after constraints"""
|
||||
not_before = datetime(2023, 12, 1)
|
||||
not_after = datetime(2023, 12, 31)
|
||||
assert validate_date('2023-12-15', not_before=not_before, not_after=not_after) is True
|
||||
assert validate_date('2023-11-30', not_before=not_before, not_after=not_after) is False
|
||||
assert validate_date('2024-01-01', not_before=not_before, not_after=not_after) is False
|
||||
|
||||
|
||||
class TestParseFlexibleDate:
|
||||
"""Test suite for parse_flexible_date function"""
|
||||
|
||||
def test_parse_flexible_date_iso8601_full(self):
|
||||
"""Test parse_flexible_date with full ISO 8601 format"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.month == 12
|
||||
assert result.day == 25
|
||||
assert result.hour == 15
|
||||
assert result.minute == 30
|
||||
assert result.second == 45
|
||||
|
||||
def test_parse_flexible_date_iso8601_with_microseconds(self):
|
||||
"""Test parse_flexible_date with microseconds"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45.123456')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.microsecond == 123456
|
||||
|
||||
def test_parse_flexible_date_simple_date(self):
|
||||
"""Test parse_flexible_date with simple date format"""
|
||||
result = parse_flexible_date('2023-12-25')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.month == 12
|
||||
assert result.day == 25
|
||||
|
||||
def test_parse_flexible_date_with_timezone_string(self):
|
||||
"""Test parse_flexible_date with timezone string"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45', timezone_tz='Asia/Tokyo')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.tzinfo is not None
|
||||
|
||||
def test_parse_flexible_date_with_timezone_zoneinfo(self):
|
||||
"""Test parse_flexible_date with ZoneInfo object"""
|
||||
tz = ZoneInfo('UTC')
|
||||
result = parse_flexible_date('2023-12-25T15:30:45', timezone_tz=tz)
|
||||
assert isinstance(result, datetime)
|
||||
assert result.tzinfo is not None
|
||||
|
||||
def test_parse_flexible_date_with_timezone_no_shift(self):
|
||||
"""Test parse_flexible_date with timezone but no shift"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45', timezone_tz='UTC', shift_time_zone=False)
|
||||
assert isinstance(result, datetime)
|
||||
assert result.hour == 15 # Should not shift
|
||||
|
||||
def test_parse_flexible_date_with_timezone_shift(self):
|
||||
"""Test parse_flexible_date with timezone shift"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45+00:00', timezone_tz='Asia/Tokyo', shift_time_zone=True)
|
||||
assert isinstance(result, datetime)
|
||||
assert result.tzinfo is not None
|
||||
|
||||
def test_parse_flexible_date_missing_t_with_timezone_shift(self):
|
||||
"""Test parse_flexible_date with timezone shift"""
|
||||
result = parse_flexible_date('2023-12-25 15:30:45+00:00', timezone_tz='Asia/Tokyo', shift_time_zone=True)
|
||||
assert isinstance(result, datetime)
|
||||
assert result.tzinfo is not None
|
||||
|
||||
def test_parse_flexible_date_space_separated_datetime(self):
|
||||
"""Test parse_flexible_date with space-separated datetime format"""
|
||||
result = parse_flexible_date('2023-12-25 15:30:45')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.month == 12
|
||||
assert result.day == 25
|
||||
assert result.hour == 15
|
||||
assert result.minute == 30
|
||||
assert result.second == 45
|
||||
|
||||
def test_parse_flexible_date_space_separated_with_microseconds(self):
|
||||
"""Test parse_flexible_date with space-separated datetime and microseconds"""
|
||||
result = parse_flexible_date('2023-12-25 15:30:45.123456')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.month == 12
|
||||
assert result.day == 25
|
||||
assert result.hour == 15
|
||||
assert result.minute == 30
|
||||
assert result.second == 45
|
||||
assert result.microsecond == 123456
|
||||
|
||||
def test_parse_flexible_date_t_separated_datetime(self):
|
||||
"""Test parse_flexible_date with T-separated datetime (alternative ISO format)"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.month == 12
|
||||
assert result.day == 25
|
||||
assert result.hour == 15
|
||||
assert result.minute == 30
|
||||
assert result.second == 45
|
||||
|
||||
def test_parse_flexible_date_t_separated_with_microseconds(self):
|
||||
"""Test parse_flexible_date with T-separated datetime and microseconds"""
|
||||
result = parse_flexible_date('2023-12-25T15:30:45.123456')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
assert result.microsecond == 123456
|
||||
|
||||
def test_parse_flexible_date_invalid_format(self):
|
||||
"""Test parse_flexible_date with invalid format returns None"""
|
||||
result = parse_flexible_date('invalid-date')
|
||||
assert result is None
|
||||
|
||||
def test_parse_flexible_date_whitespace(self):
|
||||
"""Test parse_flexible_date with whitespace"""
|
||||
result = parse_flexible_date(' 2023-12-25 ')
|
||||
assert isinstance(result, datetime)
|
||||
assert result.year == 2023
|
||||
|
||||
|
||||
class TestCompareDates:
|
||||
"""Test suite for compare_dates function"""
|
||||
|
||||
def test_compare_dates_first_newer(self):
|
||||
"""Test compare_dates when first date is newer"""
|
||||
result = compare_dates('2024-01-02', '2024-01-01')
|
||||
assert result is True
|
||||
|
||||
def test_compare_dates_first_older(self):
|
||||
"""Test compare_dates when first date is older"""
|
||||
result = compare_dates('2024-01-01', '2024-01-02')
|
||||
assert result is False
|
||||
|
||||
def test_compare_dates_equal(self):
|
||||
"""Test compare_dates when dates are equal"""
|
||||
result = compare_dates('2024-01-01', '2024-01-01')
|
||||
assert result is False
|
||||
|
||||
def test_compare_dates_with_time(self):
|
||||
"""Test compare_dates with time components (should only compare dates)"""
|
||||
result = compare_dates('2024-01-02T10:00:00', '2024-01-01T23:59:59')
|
||||
assert result is True
|
||||
|
||||
def test_compare_dates_invalid_first_date(self):
|
||||
"""Test compare_dates with invalid first date"""
|
||||
result = compare_dates('invalid', '2024-01-01')
|
||||
assert result is None
|
||||
|
||||
def test_compare_dates_invalid_second_date(self):
|
||||
"""Test compare_dates with invalid second date"""
|
||||
result = compare_dates('2024-01-01', 'invalid')
|
||||
assert result is None
|
||||
|
||||
def test_compare_dates_both_invalid(self):
|
||||
"""Test compare_dates with both dates invalid"""
|
||||
result = compare_dates('invalid1', 'invalid2')
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestFindNewestDatetimeInList:
|
||||
"""Test suite for find_newest_datetime_in_list function"""
|
||||
|
||||
def test_find_newest_datetime_in_list_basic(self):
|
||||
"""Test find_newest_datetime_in_list with basic list"""
|
||||
dates = [
|
||||
'2023-12-25T10:00:00',
|
||||
'2024-01-01T12:00:00',
|
||||
'2023-11-15T08:00:00'
|
||||
]
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result == '2024-01-01T12:00:00'
|
||||
|
||||
def test_find_newest_datetime_in_list_with_timezone(self):
|
||||
"""Test find_newest_datetime_in_list with timezone-aware dates"""
|
||||
dates = [
|
||||
'2025-08-06T16:17:39.747+09:00',
|
||||
'2025-08-05T16:17:39.747+09:00',
|
||||
'2025-08-07T16:17:39.747+09:00'
|
||||
]
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result == '2025-08-07T16:17:39.747+09:00'
|
||||
|
||||
def test_find_newest_datetime_in_list_empty_list(self):
|
||||
"""Test find_newest_datetime_in_list with empty list"""
|
||||
result = find_newest_datetime_in_list([])
|
||||
assert result is None
|
||||
|
||||
def test_find_newest_datetime_in_list_single_date(self):
|
||||
"""Test find_newest_datetime_in_list with single date"""
|
||||
dates = ['2024-01-01T12:00:00']
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result == '2024-01-01T12:00:00'
|
||||
|
||||
def test_find_newest_datetime_in_list_with_invalid_dates(self):
|
||||
"""Test find_newest_datetime_in_list with some invalid dates"""
|
||||
dates = [
|
||||
'2023-12-25T10:00:00',
|
||||
'invalid-date',
|
||||
'2024-01-01T12:00:00'
|
||||
]
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result == '2024-01-01T12:00:00'
|
||||
|
||||
def test_find_newest_datetime_in_list_all_invalid(self):
|
||||
"""Test find_newest_datetime_in_list with all invalid dates"""
|
||||
dates = ['invalid1', 'invalid2', 'invalid3']
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result is None
|
||||
|
||||
def test_find_newest_datetime_in_list_mixed_formats(self):
|
||||
"""Test find_newest_datetime_in_list with mixed date formats"""
|
||||
dates = [
|
||||
'2023-12-25',
|
||||
'2024-01-01T12:00:00',
|
||||
'2023-11-15T08:00:00.123456'
|
||||
]
|
||||
result = find_newest_datetime_in_list(dates)
|
||||
assert result == '2024-01-01T12:00:00'
|
||||
|
||||
|
||||
class TestParseDayOfWeekRange:
|
||||
"""Test suite for parse_day_of_week_range function"""
|
||||
|
||||
def test_parse_day_of_week_range_single_day(self):
|
||||
"""Test parse_day_of_week_range with single day"""
|
||||
result = parse_day_of_week_range('Mon')
|
||||
assert result == [(1, 'Mon')]
|
||||
|
||||
def test_parse_day_of_week_range_multiple_days(self):
|
||||
"""Test parse_day_of_week_range with multiple days"""
|
||||
result = parse_day_of_week_range('Mon,Wed,Fri')
|
||||
assert len(result) == 3
|
||||
assert (1, 'Mon') in result
|
||||
assert (3, 'Wed') in result
|
||||
assert (5, 'Fri') in result
|
||||
|
||||
def test_parse_day_of_week_range_simple_range(self):
|
||||
"""Test parse_day_of_week_range with simple range"""
|
||||
result = parse_day_of_week_range('Mon-Fri')
|
||||
assert len(result) == 5
|
||||
assert result[0] == (1, 'Mon')
|
||||
assert result[-1] == (5, 'Fri')
|
||||
|
||||
def test_parse_day_of_week_range_weekend_spanning(self):
|
||||
"""Test parse_day_of_week_range with weekend-spanning range"""
|
||||
result = parse_day_of_week_range('Fri-Mon')
|
||||
assert len(result) == 4
|
||||
assert (5, 'Fri') in result
|
||||
assert (6, 'Sat') in result
|
||||
assert (7, 'Sun') in result
|
||||
assert (1, 'Mon') in result
|
||||
|
||||
def test_parse_day_of_week_range_long_names(self):
|
||||
"""Test parse_day_of_week_range with long day names - only works in ranges"""
|
||||
# Long names only work in ranges, not as standalone days
|
||||
# This is a limitation of the current implementation
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_day_of_week_range('Monday,Wednesday')
|
||||
assert 'Invalid day of week entry found' in str(exc_info.value)
|
||||
|
||||
def test_parse_day_of_week_range_mixed_format(self):
|
||||
"""Test parse_day_of_week_range with short names and ranges"""
|
||||
result = parse_day_of_week_range('Mon,Wed-Fri')
|
||||
assert len(result) == 4
|
||||
assert (1, 'Mon') in result
|
||||
assert (3, 'Wed') in result
|
||||
assert (4, 'Thu') in result
|
||||
assert (5, 'Fri') in result
|
||||
|
||||
def test_parse_day_of_week_range_invalid_day(self):
|
||||
"""Test parse_day_of_week_range with invalid day"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_day_of_week_range('InvalidDay')
|
||||
assert 'Invalid day of week entry found' in str(exc_info.value)
|
||||
|
||||
def test_parse_day_of_week_range_duplicate_days(self):
|
||||
"""Test parse_day_of_week_range with duplicate days"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_day_of_week_range('Mon,Mon')
|
||||
assert 'Duplicate day of week entries found' in str(exc_info.value)
|
||||
|
||||
def test_parse_day_of_week_range_whitespace_handling(self):
|
||||
"""Test parse_day_of_week_range with extra whitespace"""
|
||||
result = parse_day_of_week_range(' Mon , Wed , Fri ')
|
||||
assert len(result) == 3
|
||||
assert (1, 'Mon') in result
|
||||
|
||||
|
||||
class TestParseTimeRange:
|
||||
"""Test suite for parse_time_range function"""
|
||||
|
||||
def test_parse_time_range_valid(self):
|
||||
"""Test parse_time_range with valid time range"""
|
||||
start, end = parse_time_range('09:00-17:00')
|
||||
assert start == time(9, 0)
|
||||
assert end == time(17, 0)
|
||||
|
||||
def test_parse_time_range_different_times(self):
|
||||
"""Test parse_time_range with different time values"""
|
||||
start, end = parse_time_range('08:30-12:45')
|
||||
assert start == time(8, 30)
|
||||
assert end == time(12, 45)
|
||||
|
||||
def test_parse_time_range_invalid_block(self):
|
||||
"""Test parse_time_range with invalid block format"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_time_range('09:00')
|
||||
assert 'Invalid time block' in str(exc_info.value)
|
||||
|
||||
def test_parse_time_range_invalid_format(self):
|
||||
"""Test parse_time_range with invalid time format"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_time_range('25:00-26:00')
|
||||
assert 'Invalid time block format' in str(exc_info.value)
|
||||
|
||||
def test_parse_time_range_start_after_end(self):
|
||||
"""Test parse_time_range with start time after end time"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_time_range('17:00-09:00')
|
||||
assert 'start time after end time' in str(exc_info.value)
|
||||
|
||||
def test_parse_time_range_equal_times(self):
|
||||
"""Test parse_time_range with equal start and end times"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parse_time_range('09:00-09:00')
|
||||
assert 'start time after end time or equal' in str(exc_info.value)
|
||||
|
||||
def test_parse_time_range_custom_format(self):
|
||||
"""Test parse_time_range with custom time format"""
|
||||
start, end = parse_time_range('09:00:00-17:00:00', time_format='%H:%M:%S')
|
||||
assert start == time(9, 0, 0)
|
||||
assert end == time(17, 0, 0)
|
||||
|
||||
def test_parse_time_range_whitespace(self):
|
||||
"""Test parse_time_range with whitespace"""
|
||||
start, end = parse_time_range(' 09:00-17:00 ')
|
||||
assert start == time(9, 0)
|
||||
assert end == time(17, 0)
|
||||
|
||||
|
||||
class TestTimesOverlapOrConnect:
|
||||
"""Test suite for times_overlap_or_connect function"""
|
||||
|
||||
def test_times_overlap_or_connect_clear_overlap(self):
|
||||
"""Test times_overlap_or_connect with clear overlap"""
|
||||
time1 = (time(9, 0), time(12, 0))
|
||||
time2 = (time(10, 0), time(14, 0))
|
||||
assert times_overlap_or_connect(time1, time2) is True
|
||||
|
||||
def test_times_overlap_or_connect_no_overlap(self):
|
||||
"""Test times_overlap_or_connect with no overlap"""
|
||||
time1 = (time(9, 0), time(12, 0))
|
||||
time2 = (time(13, 0), time(17, 0))
|
||||
assert times_overlap_or_connect(time1, time2) is False
|
||||
|
||||
def test_times_overlap_or_connect_touching_not_allowed(self):
|
||||
"""Test times_overlap_or_connect with touching ranges (not allowed)"""
|
||||
time1 = (time(8, 0), time(10, 0))
|
||||
time2 = (time(10, 0), time(12, 0))
|
||||
assert times_overlap_or_connect(time1, time2, allow_touching=False) is True
|
||||
|
||||
def test_times_overlap_or_connect_touching_allowed(self):
|
||||
"""Test times_overlap_or_connect with touching ranges (allowed)"""
|
||||
time1 = (time(8, 0), time(10, 0))
|
||||
time2 = (time(10, 0), time(12, 0))
|
||||
assert times_overlap_or_connect(time1, time2, allow_touching=True) is False
|
||||
|
||||
def test_times_overlap_or_connect_one_contains_other(self):
|
||||
"""Test times_overlap_or_connect when one range contains the other"""
|
||||
time1 = (time(9, 0), time(17, 0))
|
||||
time2 = (time(10, 0), time(12, 0))
|
||||
assert times_overlap_or_connect(time1, time2) is True
|
||||
|
||||
def test_times_overlap_or_connect_same_start(self):
|
||||
"""Test times_overlap_or_connect with same start time"""
|
||||
time1 = (time(9, 0), time(12, 0))
|
||||
time2 = (time(9, 0), time(14, 0))
|
||||
assert times_overlap_or_connect(time1, time2) is True
|
||||
|
||||
def test_times_overlap_or_connect_same_end(self):
|
||||
"""Test times_overlap_or_connect with same end time"""
|
||||
time1 = (time(9, 0), time(12, 0))
|
||||
time2 = (time(10, 0), time(12, 0))
|
||||
assert times_overlap_or_connect(time1, time2) is True
|
||||
|
||||
|
||||
class TestIsTimeInRange:
|
||||
"""Test suite for is_time_in_range function"""
|
||||
|
||||
def test_is_time_in_range_within_range(self):
|
||||
"""Test is_time_in_range with time within range"""
|
||||
assert is_time_in_range('10:00:00', '09:00:00', '17:00:00') is True
|
||||
|
||||
def test_is_time_in_range_at_start(self):
|
||||
"""Test is_time_in_range with time at start of range"""
|
||||
assert is_time_in_range('09:00:00', '09:00:00', '17:00:00') is True
|
||||
|
||||
def test_is_time_in_range_at_end(self):
|
||||
"""Test is_time_in_range with time at end of range"""
|
||||
assert is_time_in_range('17:00:00', '09:00:00', '17:00:00') is True
|
||||
|
||||
def test_is_time_in_range_before_range(self):
|
||||
"""Test is_time_in_range with time before range"""
|
||||
assert is_time_in_range('08:00:00', '09:00:00', '17:00:00') is False
|
||||
|
||||
def test_is_time_in_range_after_range(self):
|
||||
"""Test is_time_in_range with time after range"""
|
||||
assert is_time_in_range('18:00:00', '09:00:00', '17:00:00') is False
|
||||
|
||||
def test_is_time_in_range_crosses_midnight(self):
|
||||
"""Test is_time_in_range with range crossing midnight"""
|
||||
# Range from 22:00 to 06:00
|
||||
assert is_time_in_range('23:00:00', '22:00:00', '06:00:00') is True
|
||||
assert is_time_in_range('03:00:00', '22:00:00', '06:00:00') is True
|
||||
assert is_time_in_range('12:00:00', '22:00:00', '06:00:00') is False
|
||||
|
||||
def test_is_time_in_range_midnight_boundary(self):
|
||||
"""Test is_time_in_range at midnight"""
|
||||
assert is_time_in_range('00:00:00', '22:00:00', '06:00:00') is True
|
||||
|
||||
|
||||
class TestReorderWeekdaysFromToday:
|
||||
"""Test suite for reorder_weekdays_from_today function"""
|
||||
|
||||
def test_reorder_weekdays_from_monday(self):
|
||||
"""Test reorder_weekdays_from_today starting from Monday"""
|
||||
result = reorder_weekdays_from_today('Mon')
|
||||
values = list(result.values())
|
||||
assert values[0] == 'Mon'
|
||||
assert values[-1] == 'Sun'
|
||||
assert len(result) == 7
|
||||
|
||||
def test_reorder_weekdays_from_wednesday(self):
|
||||
"""Test reorder_weekdays_from_today starting from Wednesday"""
|
||||
result = reorder_weekdays_from_today('Wed')
|
||||
values = list(result.values())
|
||||
assert values[0] == 'Wed'
|
||||
assert values[1] == 'Thu'
|
||||
assert values[-1] == 'Tue'
|
||||
|
||||
def test_reorder_weekdays_from_sunday(self):
|
||||
"""Test reorder_weekdays_from_today starting from Sunday"""
|
||||
result = reorder_weekdays_from_today('Sun')
|
||||
values = list(result.values())
|
||||
assert values[0] == 'Sun'
|
||||
assert values[-1] == 'Sat'
|
||||
|
||||
def test_reorder_weekdays_from_long_name(self):
|
||||
"""Test reorder_weekdays_from_today with long day name"""
|
||||
result = reorder_weekdays_from_today('Friday')
|
||||
values = list(result.values())
|
||||
assert values[0] == 'Fri'
|
||||
assert values[-1] == 'Thu'
|
||||
|
||||
def test_reorder_weekdays_invalid_day(self):
|
||||
"""Test reorder_weekdays_from_today with invalid day name"""
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
reorder_weekdays_from_today('InvalidDay')
|
||||
assert 'Invalid day name provided' in str(exc_info.value)
|
||||
|
||||
def test_reorder_weekdays_preserves_all_days(self):
|
||||
"""Test that reorder_weekdays_from_today preserves all 7 days"""
|
||||
for day in ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']:
|
||||
result = reorder_weekdays_from_today(day)
|
||||
assert len(result) == 7
|
||||
assert set(result.values()) == {'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'}
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
"""Test suite for edge cases and integration scenarios"""
|
||||
|
||||
def test_parse_flexible_date_with_various_iso_formats(self):
|
||||
"""Test parse_flexible_date handles various ISO format variations"""
|
||||
formats = [
|
||||
'2023-12-25',
|
||||
'2023-12-25T15:30:45',
|
||||
'2023-12-25T15:30:45.123456',
|
||||
]
|
||||
for date_str in formats:
|
||||
result = parse_flexible_date(date_str)
|
||||
assert result is not None
|
||||
assert isinstance(result, datetime)
|
||||
|
||||
def test_timezone_consistency_across_functions(self):
|
||||
"""Test timezone handling consistency across functions"""
|
||||
tz_str = 'Asia/Tokyo'
|
||||
tz_obj = parse_timezone_data(tz_str)
|
||||
|
||||
# Both should work with get_datetime_iso8601
|
||||
result1 = get_datetime_iso8601(tz_str)
|
||||
result2 = get_datetime_iso8601(tz_obj)
|
||||
|
||||
assert result1 is not None
|
||||
assert result2 is not None
|
||||
|
||||
def test_date_validation_and_parsing_consistency(self):
|
||||
"""Test that validate_date and parse_flexible_date agree"""
|
||||
valid_dates = ['2023-12-25', '2024/01/01']
|
||||
for date_str in valid_dates:
|
||||
# normalize format for parse_flexible_date
|
||||
normalized = date_str.replace('/', '-')
|
||||
assert validate_date(date_str) is True
|
||||
assert parse_flexible_date(normalized) is not None
|
||||
|
||||
def test_day_of_week_range_complex_scenario(self):
|
||||
"""Test parse_day_of_week_range with complex mixed input"""
|
||||
result = parse_day_of_week_range('Mon,Wed-Fri,Sun')
|
||||
assert len(result) == 5
|
||||
assert (1, 'Mon') in result
|
||||
assert (3, 'Wed') in result
|
||||
assert (4, 'Thu') in result
|
||||
assert (5, 'Fri') in result
|
||||
assert (7, 'Sun') in result
|
||||
|
||||
def test_time_range_boundary_conditions(self):
|
||||
"""Test parse_time_range with boundary times"""
|
||||
start, end = parse_time_range('00:00-23:59')
|
||||
assert start == time(0, 0)
|
||||
assert end == time(23, 59)
|
||||
|
||||
# __END__
|
||||
462
tests/unit/datetime_handling/test_seconds_to_string.py
Normal file
462
tests/unit/datetime_handling/test_seconds_to_string.py
Normal file
@@ -0,0 +1,462 @@
|
||||
"""
|
||||
PyTest: datetime_handling/timestamp_convert - seconds_to_string and convert_timestamp functions
|
||||
"""
|
||||
|
||||
from corelibs.datetime_handling.timestamp_convert import seconds_to_string, convert_timestamp
|
||||
|
||||
|
||||
class TestSecondsToString:
|
||||
"""Test suite for seconds_to_string function"""
|
||||
|
||||
def test_basic_integer_seconds(self):
|
||||
"""Test conversion of basic integer seconds"""
|
||||
assert seconds_to_string(0) == "0s"
|
||||
assert seconds_to_string(1) == "1s"
|
||||
assert seconds_to_string(30) == "30s"
|
||||
assert seconds_to_string(59) == "59s"
|
||||
|
||||
def test_minutes_conversion(self):
|
||||
"""Test conversion involving minutes"""
|
||||
assert seconds_to_string(60) == "1m"
|
||||
assert seconds_to_string(90) == "1m 30s"
|
||||
assert seconds_to_string(120) == "2m"
|
||||
assert seconds_to_string(3599) == "59m 59s"
|
||||
|
||||
def test_hours_conversion(self):
|
||||
"""Test conversion involving hours"""
|
||||
assert seconds_to_string(3600) == "1h"
|
||||
assert seconds_to_string(3660) == "1h 1m"
|
||||
assert seconds_to_string(3661) == "1h 1m 1s"
|
||||
assert seconds_to_string(7200) == "2h"
|
||||
assert seconds_to_string(7260) == "2h 1m"
|
||||
|
||||
def test_days_conversion(self):
|
||||
"""Test conversion involving days"""
|
||||
assert seconds_to_string(86400) == "1d"
|
||||
assert seconds_to_string(86401) == "1d 1s"
|
||||
assert seconds_to_string(90000) == "1d 1h"
|
||||
assert seconds_to_string(90061) == "1d 1h 1m 1s"
|
||||
assert seconds_to_string(172800) == "2d"
|
||||
|
||||
def test_complex_combinations(self):
|
||||
"""Test complex time combinations"""
|
||||
# 1 day, 2 hours, 3 minutes, 4 seconds
|
||||
total = 86400 + 7200 + 180 + 4
|
||||
assert seconds_to_string(total) == "1d 2h 3m 4s"
|
||||
|
||||
# 5 days, 23 hours, 59 minutes, 59 seconds
|
||||
total = 5 * 86400 + 23 * 3600 + 59 * 60 + 59
|
||||
assert seconds_to_string(total) == "5d 23h 59m 59s"
|
||||
|
||||
def test_fractional_seconds_default_precision(self):
|
||||
"""Test fractional seconds with default precision (3 decimal places)"""
|
||||
assert seconds_to_string(0.1) == "0.1s"
|
||||
assert seconds_to_string(0.123) == "0.123s"
|
||||
assert seconds_to_string(0.1234) == "0.123s"
|
||||
assert seconds_to_string(1.5) == "1.5s"
|
||||
assert seconds_to_string(1.567) == "1.567s"
|
||||
assert seconds_to_string(1.5678) == "1.568s"
|
||||
|
||||
def test_fractional_seconds_microsecond_precision(self):
|
||||
"""Test fractional seconds with microsecond precision"""
|
||||
assert seconds_to_string(0.1, show_microseconds=True) == "0.1s"
|
||||
assert seconds_to_string(0.123456, show_microseconds=True) == "0.123456s"
|
||||
assert seconds_to_string(0.1234567, show_microseconds=True) == "0.123457s"
|
||||
assert seconds_to_string(1.5, show_microseconds=True) == "1.5s"
|
||||
assert seconds_to_string(1.567890, show_microseconds=True) == "1.56789s"
|
||||
|
||||
def test_fractional_seconds_with_larger_units(self):
|
||||
"""Test fractional seconds combined with larger time units"""
|
||||
# 1 minute and 30.5 seconds
|
||||
assert seconds_to_string(90.5) == "1m 30.5s"
|
||||
assert seconds_to_string(90.5, show_microseconds=True) == "1m 30.5s"
|
||||
|
||||
# 1 hour, 1 minute, and 1.123 seconds
|
||||
total = 3600 + 60 + 1.123
|
||||
assert seconds_to_string(total) == "1h 1m 1.123s"
|
||||
assert seconds_to_string(total, show_microseconds=True) == "1h 1m 1.123s"
|
||||
|
||||
def test_negative_values(self):
|
||||
"""Test negative time values"""
|
||||
assert seconds_to_string(-1) == "-1s"
|
||||
assert seconds_to_string(-60) == "-1m"
|
||||
assert seconds_to_string(-90) == "-1m 30s"
|
||||
assert seconds_to_string(-3661) == "-1h 1m 1s"
|
||||
assert seconds_to_string(-86401) == "-1d 1s"
|
||||
assert seconds_to_string(-1.5) == "-1.5s"
|
||||
assert seconds_to_string(-90.123) == "-1m 30.123s"
|
||||
|
||||
def test_zero_handling(self):
|
||||
"""Test various zero values"""
|
||||
assert seconds_to_string(0) == "0s"
|
||||
assert seconds_to_string(0.0) == "0s"
|
||||
assert seconds_to_string(-0) == "0s"
|
||||
assert seconds_to_string(-0.0) == "0s"
|
||||
|
||||
def test_float_input_types(self):
|
||||
"""Test various float input types"""
|
||||
assert seconds_to_string(1.0) == "1s"
|
||||
assert seconds_to_string(60.0) == "1m"
|
||||
assert seconds_to_string(3600.0) == "1h"
|
||||
assert seconds_to_string(86400.0) == "1d"
|
||||
|
||||
def test_large_values(self):
|
||||
"""Test handling of large time values"""
|
||||
# 365 days (1 year)
|
||||
year_seconds = 365 * 86400
|
||||
assert seconds_to_string(year_seconds) == "365d"
|
||||
|
||||
# 1000 days
|
||||
assert seconds_to_string(1000 * 86400) == "1000d"
|
||||
|
||||
# Large number with all units
|
||||
large_time = 999 * 86400 + 23 * 3600 + 59 * 60 + 59.999
|
||||
result = seconds_to_string(large_time)
|
||||
assert result.startswith("999d")
|
||||
assert "23h" in result
|
||||
assert "59m" in result
|
||||
assert "59.999s" in result
|
||||
|
||||
def test_rounding_behavior(self):
|
||||
"""Test rounding behavior for fractional seconds"""
|
||||
# Default precision (3 decimal places) - values are truncated via rstrip
|
||||
assert seconds_to_string(1.0004) == "1s" # Truncates trailing zeros after rstrip
|
||||
assert seconds_to_string(1.0005) == "1s" # Truncates trailing zeros after rstrip
|
||||
assert seconds_to_string(1.9999) == "2s" # Rounds up and strips .000
|
||||
|
||||
# Microsecond precision (6 decimal places)
|
||||
assert seconds_to_string(1.0000004, show_microseconds=True) == "1s"
|
||||
assert seconds_to_string(1.0000005, show_microseconds=True) == "1.000001s"
|
||||
|
||||
def test_trailing_zero_removal(self):
|
||||
"""Test that trailing zeros are properly removed"""
|
||||
assert seconds_to_string(1.100) == "1.1s"
|
||||
assert seconds_to_string(1.120) == "1.12s"
|
||||
assert seconds_to_string(1.123) == "1.123s"
|
||||
assert seconds_to_string(1.100000, show_microseconds=True) == "1.1s"
|
||||
assert seconds_to_string(1.123000, show_microseconds=True) == "1.123s"
|
||||
|
||||
def test_invalid_input_types(self):
|
||||
"""Test handling of invalid input types"""
|
||||
# String inputs should be returned as-is
|
||||
assert seconds_to_string("invalid") == "invalid"
|
||||
assert seconds_to_string("not a number") == "not a number"
|
||||
assert seconds_to_string("") == ""
|
||||
|
||||
def test_edge_cases_boundary_values(self):
|
||||
"""Test edge cases at unit boundaries"""
|
||||
# Exactly 1 minute - 1 second
|
||||
assert seconds_to_string(59) == "59s"
|
||||
assert seconds_to_string(59.999) == "59.999s"
|
||||
|
||||
# Exactly 1 hour - 1 second
|
||||
assert seconds_to_string(3599) == "59m 59s"
|
||||
assert seconds_to_string(3599.999) == "59m 59.999s"
|
||||
|
||||
# Exactly 1 day - 1 second
|
||||
assert seconds_to_string(86399) == "23h 59m 59s"
|
||||
assert seconds_to_string(86399.999) == "23h 59m 59.999s"
|
||||
|
||||
def test_very_small_fractional_seconds(self):
|
||||
"""Test very small fractional values"""
|
||||
assert seconds_to_string(0.001) == "0.001s"
|
||||
assert seconds_to_string(0.0001) == "0s" # Below default precision
|
||||
assert seconds_to_string(0.000001, show_microseconds=True) == "0.000001s"
|
||||
assert seconds_to_string(0.0000001, show_microseconds=True) == "0s" # Below microsecond precision
|
||||
|
||||
def test_precision_consistency(self):
|
||||
"""Test that precision is consistent across different scenarios"""
|
||||
# With other units present
|
||||
assert seconds_to_string(61.123456) == "1m 1.123s"
|
||||
assert seconds_to_string(61.123456, show_microseconds=True) == "1m 1.123456s"
|
||||
|
||||
# Large values with fractional seconds
|
||||
large_val = 90061.123456 # 1d 1h 1m 1.123456s
|
||||
assert seconds_to_string(large_val) == "1d 1h 1m 1.123s"
|
||||
assert seconds_to_string(large_val, show_microseconds=True) == "1d 1h 1m 1.123456s"
|
||||
|
||||
def test_string_numeric_inputs(self):
|
||||
"""Test string inputs that represent numbers"""
|
||||
# String inputs should be returned as-is, even if they look like numbers
|
||||
assert seconds_to_string("60") == "60"
|
||||
assert seconds_to_string("1.5") == "1.5"
|
||||
assert seconds_to_string("0") == "0"
|
||||
assert seconds_to_string("-60") == "-60"
|
||||
|
||||
|
||||
class TestConvertTimestamp:
|
||||
"""Test suite for convert_timestamp function"""
|
||||
|
||||
def test_basic_integer_seconds(self):
|
||||
"""Test conversion of basic integer seconds"""
|
||||
assert convert_timestamp(0) == "0s 0ms"
|
||||
assert convert_timestamp(1) == "1s 0ms"
|
||||
assert convert_timestamp(30) == "30s 0ms"
|
||||
assert convert_timestamp(59) == "59s 0ms"
|
||||
|
||||
def test_basic_without_microseconds(self):
|
||||
"""Test conversion without showing microseconds"""
|
||||
assert convert_timestamp(0, show_microseconds=False) == "0s"
|
||||
assert convert_timestamp(1, show_microseconds=False) == "1s"
|
||||
assert convert_timestamp(30, show_microseconds=False) == "30s"
|
||||
assert convert_timestamp(59, show_microseconds=False) == "59s"
|
||||
|
||||
def test_minutes_conversion(self):
|
||||
"""Test conversion involving minutes"""
|
||||
assert convert_timestamp(60) == "1m 0s 0ms"
|
||||
assert convert_timestamp(90) == "1m 30s 0ms"
|
||||
assert convert_timestamp(120) == "2m 0s 0ms"
|
||||
assert convert_timestamp(3599) == "59m 59s 0ms"
|
||||
|
||||
def test_minutes_conversion_without_microseconds(self):
|
||||
"""Test conversion involving minutes without microseconds"""
|
||||
assert convert_timestamp(60, show_microseconds=False) == "1m 0s"
|
||||
assert convert_timestamp(90, show_microseconds=False) == "1m 30s"
|
||||
assert convert_timestamp(120, show_microseconds=False) == "2m 0s"
|
||||
|
||||
def test_hours_conversion(self):
|
||||
"""Test conversion involving hours"""
|
||||
assert convert_timestamp(3600) == "1h 0m 0s 0ms"
|
||||
assert convert_timestamp(3660) == "1h 1m 0s 0ms"
|
||||
assert convert_timestamp(3661) == "1h 1m 1s 0ms"
|
||||
assert convert_timestamp(7200) == "2h 0m 0s 0ms"
|
||||
assert convert_timestamp(7260) == "2h 1m 0s 0ms"
|
||||
|
||||
def test_hours_conversion_without_microseconds(self):
|
||||
"""Test conversion involving hours without microseconds"""
|
||||
assert convert_timestamp(3600, show_microseconds=False) == "1h 0m 0s"
|
||||
assert convert_timestamp(3660, show_microseconds=False) == "1h 1m 0s"
|
||||
assert convert_timestamp(3661, show_microseconds=False) == "1h 1m 1s"
|
||||
|
||||
def test_days_conversion(self):
|
||||
"""Test conversion involving days"""
|
||||
assert convert_timestamp(86400) == "1d 0h 0m 0s 0ms"
|
||||
assert convert_timestamp(86401) == "1d 0h 0m 1s 0ms"
|
||||
assert convert_timestamp(90000) == "1d 1h 0m 0s 0ms"
|
||||
assert convert_timestamp(90061) == "1d 1h 1m 1s 0ms"
|
||||
assert convert_timestamp(172800) == "2d 0h 0m 0s 0ms"
|
||||
|
||||
def test_days_conversion_without_microseconds(self):
|
||||
"""Test conversion involving days without microseconds"""
|
||||
assert convert_timestamp(86400, show_microseconds=False) == "1d 0h 0m 0s"
|
||||
assert convert_timestamp(86401, show_microseconds=False) == "1d 0h 0m 1s"
|
||||
assert convert_timestamp(90000, show_microseconds=False) == "1d 1h 0m 0s"
|
||||
|
||||
def test_complex_combinations(self):
|
||||
"""Test complex time combinations"""
|
||||
# 1 day, 2 hours, 3 minutes, 4 seconds
|
||||
total = 86400 + 7200 + 180 + 4
|
||||
assert convert_timestamp(total) == "1d 2h 3m 4s 0ms"
|
||||
|
||||
# 5 days, 23 hours, 59 minutes, 59 seconds
|
||||
total = 5 * 86400 + 23 * 3600 + 59 * 60 + 59
|
||||
assert convert_timestamp(total) == "5d 23h 59m 59s 0ms"
|
||||
|
||||
def test_fractional_seconds_with_microseconds(self):
|
||||
"""Test fractional seconds showing microseconds"""
|
||||
# Note: ms value is the integer of the decimal part string after rounding to 4 places
|
||||
assert convert_timestamp(0.1) == "0s 1ms" # 0.1 → "0.1" → ms=1
|
||||
assert convert_timestamp(0.123) == "0s 123ms" # 0.123 → "0.123" → ms=123
|
||||
assert convert_timestamp(0.1234) == "0s 1234ms" # 0.1234 → "0.1234" → ms=1234
|
||||
assert convert_timestamp(1.5) == "1s 5ms" # 1.5 → "1.5" → ms=5
|
||||
assert convert_timestamp(1.567) == "1s 567ms" # 1.567 → "1.567" → ms=567
|
||||
assert convert_timestamp(1.5678) == "1s 5678ms" # 1.5678 rounds to 1.5678 → ms=5678
|
||||
|
||||
def test_fractional_seconds_rounding(self):
|
||||
"""Test rounding of fractional seconds to 4 decimal places"""
|
||||
# The function rounds to 4 decimal places before splitting
|
||||
assert convert_timestamp(0.12345) == "0s 1235ms" # Rounds to 0.1235
|
||||
assert convert_timestamp(0.123456) == "0s 1235ms" # Rounds to 0.1235
|
||||
assert convert_timestamp(1.99999) == "2s 0ms" # Rounds to 2.0
|
||||
|
||||
def test_fractional_seconds_with_larger_units(self):
|
||||
"""Test fractional seconds combined with larger time units"""
|
||||
# 1 minute and 30.5 seconds
|
||||
assert convert_timestamp(90.5) == "1m 30s 5ms"
|
||||
|
||||
# 1 hour, 1 minute, and 1.123 seconds
|
||||
total = 3600 + 60 + 1.123
|
||||
assert convert_timestamp(total) == "1h 1m 1s 123ms"
|
||||
|
||||
def test_negative_values(self):
|
||||
"""Test negative time values"""
|
||||
assert convert_timestamp(-1) == "-1s 0ms"
|
||||
assert convert_timestamp(-60) == "-1m 0s 0ms"
|
||||
assert convert_timestamp(-90) == "-1m 30s 0ms"
|
||||
assert convert_timestamp(-3661) == "-1h 1m 1s 0ms"
|
||||
assert convert_timestamp(-86401) == "-1d 0h 0m 1s 0ms"
|
||||
assert convert_timestamp(-1.5) == "-1s 5ms"
|
||||
assert convert_timestamp(-90.123) == "-1m 30s 123ms"
|
||||
|
||||
def test_negative_without_microseconds(self):
|
||||
"""Test negative values without microseconds"""
|
||||
assert convert_timestamp(-1, show_microseconds=False) == "-1s"
|
||||
assert convert_timestamp(-60, show_microseconds=False) == "-1m 0s"
|
||||
assert convert_timestamp(-90.123, show_microseconds=False) == "-1m 30s"
|
||||
|
||||
def test_zero_handling(self):
|
||||
"""Test various zero values"""
|
||||
assert convert_timestamp(0) == "0s 0ms"
|
||||
assert convert_timestamp(0.0) == "0s 0ms"
|
||||
assert convert_timestamp(-0) == "0s 0ms"
|
||||
assert convert_timestamp(-0.0) == "0s 0ms"
|
||||
|
||||
def test_zero_filling_behavior(self):
|
||||
"""Test that zeros are filled between set values"""
|
||||
# If we have days and seconds, hours and minutes should be 0
|
||||
assert convert_timestamp(86401) == "1d 0h 0m 1s 0ms"
|
||||
|
||||
# If we have hours and seconds, minutes should be 0
|
||||
assert convert_timestamp(3601) == "1h 0m 1s 0ms"
|
||||
|
||||
# If we have days and hours, minutes and seconds should be 0
|
||||
assert convert_timestamp(90000) == "1d 1h 0m 0s 0ms"
|
||||
|
||||
def test_milliseconds_display(self):
|
||||
"""Test milliseconds are always shown when show_microseconds=True"""
|
||||
# Even with no fractional part, 0ms should be shown
|
||||
assert convert_timestamp(1) == "1s 0ms"
|
||||
assert convert_timestamp(60) == "1m 0s 0ms"
|
||||
assert convert_timestamp(3600) == "1h 0m 0s 0ms"
|
||||
|
||||
# With fractional part, ms should be shown
|
||||
assert convert_timestamp(1.001) == "1s 1ms" # "1.001" → ms=1
|
||||
assert convert_timestamp(1.0001) == "1s 1ms" # "1.0001" → ms=1
|
||||
|
||||
def test_float_input_types(self):
|
||||
"""Test various float input types"""
|
||||
assert convert_timestamp(1.0) == "1s 0ms"
|
||||
assert convert_timestamp(60.0) == "1m 0s 0ms"
|
||||
assert convert_timestamp(3600.0) == "1h 0m 0s 0ms"
|
||||
assert convert_timestamp(86400.0) == "1d 0h 0m 0s 0ms"
|
||||
|
||||
def test_large_values(self):
|
||||
"""Test handling of large time values"""
|
||||
# 365 days (1 year)
|
||||
year_seconds = 365 * 86400
|
||||
assert convert_timestamp(year_seconds) == "365d 0h 0m 0s 0ms"
|
||||
|
||||
# 1000 days
|
||||
assert convert_timestamp(1000 * 86400) == "1000d 0h 0m 0s 0ms"
|
||||
|
||||
# Large number with all units
|
||||
large_time = 999 * 86400 + 23 * 3600 + 59 * 60 + 59.999
|
||||
result = convert_timestamp(large_time)
|
||||
assert result.startswith("999d")
|
||||
assert "23h" in result
|
||||
assert "59m" in result
|
||||
assert "59s" in result
|
||||
assert "999ms" in result # 59.999 rounds to 59.999, ms=999
|
||||
|
||||
def test_invalid_input_types(self):
|
||||
"""Test handling of invalid input types"""
|
||||
# String inputs should be returned as-is
|
||||
assert convert_timestamp("invalid") == "invalid"
|
||||
assert convert_timestamp("not a number") == "not a number"
|
||||
assert convert_timestamp("") == ""
|
||||
|
||||
def test_string_numeric_inputs(self):
|
||||
"""Test string inputs that represent numbers"""
|
||||
# String inputs should be returned as-is, even if they look like numbers
|
||||
assert convert_timestamp("60") == "60"
|
||||
assert convert_timestamp("1.5") == "1.5"
|
||||
assert convert_timestamp("0") == "0"
|
||||
assert convert_timestamp("-60") == "-60"
|
||||
|
||||
def test_edge_cases_boundary_values(self):
|
||||
"""Test edge cases at unit boundaries"""
|
||||
# Exactly 1 minute - 1 second
|
||||
assert convert_timestamp(59) == "59s 0ms"
|
||||
assert convert_timestamp(59.999) == "59s 999ms"
|
||||
|
||||
# Exactly 1 hour - 1 second
|
||||
assert convert_timestamp(3599) == "59m 59s 0ms"
|
||||
assert convert_timestamp(3599.999) == "59m 59s 999ms"
|
||||
|
||||
# Exactly 1 day - 1 second
|
||||
assert convert_timestamp(86399) == "23h 59m 59s 0ms"
|
||||
assert convert_timestamp(86399.999) == "23h 59m 59s 999ms"
|
||||
|
||||
def test_very_small_fractional_seconds(self):
|
||||
"""Test very small fractional values"""
|
||||
assert convert_timestamp(0.001) == "0s 1ms" # 0.001 → "0.001" → ms=1
|
||||
assert convert_timestamp(0.0001) == "0s 1ms" # 0.0001 → "0.0001" → ms=1
|
||||
assert convert_timestamp(0.00005) == "0s 1ms" # 0.00005 rounds to 0.0001 → ms=1
|
||||
assert convert_timestamp(0.00004) == "0s 0ms" # 0.00004 rounds to 0.0 → ms=0
|
||||
|
||||
def test_milliseconds_extraction(self):
|
||||
"""Test that milliseconds are correctly extracted from fractional part"""
|
||||
# The ms value is the integer of the decimal part string, not a conversion
|
||||
# So 0.1 → "0.1" → ms=1, NOT 100ms as you might expect
|
||||
assert convert_timestamp(0.1) == "0s 1ms"
|
||||
# 0.01 seconds → "0.01" → ms=1 (int("01") = 1)
|
||||
assert convert_timestamp(0.01) == "0s 1ms"
|
||||
# 0.001 seconds → "0.001" → ms=1
|
||||
assert convert_timestamp(0.001) == "0s 1ms"
|
||||
# 0.0001 seconds → "0.0001" → ms=1
|
||||
assert convert_timestamp(0.0001) == "0s 1ms"
|
||||
# 0.00004 seconds rounds to "0.0" → ms=0
|
||||
assert convert_timestamp(0.00004) == "0s 0ms"
|
||||
|
||||
def test_comparison_with_seconds_to_string(self):
|
||||
"""Test differences between convert_timestamp and seconds_to_string"""
|
||||
# convert_timestamp fills zeros and adds ms
|
||||
# seconds_to_string omits zeros and no ms
|
||||
assert convert_timestamp(86401) == "1d 0h 0m 1s 0ms"
|
||||
assert seconds_to_string(86401) == "1d 1s"
|
||||
|
||||
assert convert_timestamp(3661) == "1h 1m 1s 0ms"
|
||||
assert seconds_to_string(3661) == "1h 1m 1s"
|
||||
|
||||
# With microseconds disabled, still different due to zero-filling
|
||||
assert convert_timestamp(86401, show_microseconds=False) == "1d 0h 0m 1s"
|
||||
assert seconds_to_string(86401) == "1d 1s"
|
||||
|
||||
def test_precision_consistency(self):
|
||||
"""Test that precision is consistent across different scenarios"""
|
||||
# With other units present
|
||||
assert convert_timestamp(61.123456) == "1m 1s 1235ms" # Rounds to 61.1235
|
||||
|
||||
# Large values with fractional seconds
|
||||
large_val = 90061.123456 # 1d 1h 1m 1.123456s
|
||||
assert convert_timestamp(large_val) == "1d 1h 1m 1s 1235ms" # Rounds to .1235
|
||||
|
||||
def test_microseconds_flag_consistency(self):
|
||||
"""Test that show_microseconds flag works consistently"""
|
||||
test_values = [0, 1, 60, 3600, 86400, 1.5, 90.123, -60]
|
||||
|
||||
for val in test_values:
|
||||
with_ms = convert_timestamp(val, show_microseconds=True)
|
||||
without_ms = convert_timestamp(val, show_microseconds=False)
|
||||
|
||||
# With microseconds should contain 'ms', without should not
|
||||
assert "ms" in with_ms
|
||||
assert "ms" not in without_ms
|
||||
|
||||
# Both should start with same sign if negative
|
||||
if val < 0:
|
||||
assert with_ms.startswith("-")
|
||||
assert without_ms.startswith("-")
|
||||
|
||||
def test_format_consistency(self):
|
||||
"""Test that output format is consistent"""
|
||||
# All outputs should have consistent spacing and unit ordering
|
||||
# Format should be: [d ]h m s[ ms]
|
||||
result = convert_timestamp(93784.5678) # 1d 2h 3m 4.5678s
|
||||
# 93784.5678 rounds to 93784.5678, splits to ["93784", "5678"]
|
||||
assert result == "1d 2h 3m 4s 5678ms"
|
||||
|
||||
# Verify parts are in correct order
|
||||
parts = result.split()
|
||||
# Extract units properly: last 1-2 chars that are letters
|
||||
units: list[str] = []
|
||||
for p in parts:
|
||||
if p.endswith('ms'):
|
||||
units.append('ms')
|
||||
elif p[-1].isalpha():
|
||||
units.append(p[-1])
|
||||
# Should be in order: d, h, m, s, ms
|
||||
expected_order = ['d', 'h', 'm', 's', 'ms']
|
||||
assert units == expected_order
|
||||
|
||||
# __END__
|
||||
194
tests/unit/datetime_handling/test_timestamp_strings.py
Normal file
194
tests/unit/datetime_handling/test_timestamp_strings.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""
|
||||
PyTest: datetime_handling/timestamp_strings
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch
|
||||
from zoneinfo import ZoneInfo
|
||||
import pytest
|
||||
|
||||
# Assuming the class is in a file called timestamp_strings.py
|
||||
from corelibs.datetime_handling.timestamp_strings import TimestampStrings
|
||||
|
||||
|
||||
class TestTimestampStrings:
|
||||
"""Test suite for TimestampStrings class"""
|
||||
|
||||
def test_default_initialization(self):
|
||||
"""Test initialization with default timezone"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings()
|
||||
|
||||
assert ts.time_zone == 'Asia/Tokyo'
|
||||
assert ts.timestamp_now == mock_now
|
||||
assert ts.today == '2023-12-25'
|
||||
assert ts.timestamp == '2023-12-25 15:30:45'
|
||||
assert ts.timestamp_file == '2023-12-25_153045'
|
||||
|
||||
def test_custom_timezone_initialization(self):
|
||||
"""Test initialization with custom timezone"""
|
||||
custom_tz = 'America/New_York'
|
||||
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings(time_zone=custom_tz)
|
||||
|
||||
assert ts.time_zone == custom_tz
|
||||
assert ts.timestamp_now == mock_now
|
||||
|
||||
def test_invalid_timezone_raises_error(self):
|
||||
"""Test that invalid timezone raises ValueError"""
|
||||
invalid_tz = 'Invalid/Timezone'
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
TimestampStrings(time_zone=invalid_tz)
|
||||
|
||||
assert 'Zone could not be loaded [Invalid/Timezone]' in str(exc_info.value)
|
||||
|
||||
def test_timestamp_formats(self):
|
||||
"""Test various timestamp format outputs"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
# Mock both datetime.now() calls
|
||||
mock_now = datetime(2023, 12, 25, 9, 5, 3)
|
||||
mock_now_tz = datetime(2023, 12, 25, 23, 5, 3, tzinfo=ZoneInfo('Asia/Tokyo'))
|
||||
|
||||
mock_datetime.now.side_effect = [mock_now, mock_now_tz]
|
||||
|
||||
ts = TimestampStrings()
|
||||
|
||||
assert ts.today == '2023-12-25'
|
||||
assert ts.timestamp == '2023-12-25 09:05:03'
|
||||
assert ts.timestamp_file == '2023-12-25_090503'
|
||||
assert 'JST' in ts.timestamp_tz or 'Asia/Tokyo' in ts.timestamp_tz
|
||||
|
||||
def test_different_timezones_produce_different_results(self):
|
||||
"""Test that different timezones produce different timestamp_tz values"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 12, 0, 0)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
# Create instances with different timezones
|
||||
ts_tokyo = TimestampStrings(time_zone='Asia/Tokyo')
|
||||
ts_ny = TimestampStrings(time_zone='America/New_York')
|
||||
|
||||
# The timezone-aware timestamps should be different
|
||||
assert ts_tokyo.time_zone != ts_ny.time_zone
|
||||
# Note: The actual timestamp_tz values will depend on the mocked datetime
|
||||
|
||||
def test_class_default_timezone(self):
|
||||
"""Test that class default timezone is correctly set"""
|
||||
assert TimestampStrings.TIME_ZONE == 'Asia/Tokyo'
|
||||
|
||||
def test_none_timezone_uses_default(self):
|
||||
"""Test that passing None for timezone uses class default"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings(time_zone=None)
|
||||
|
||||
assert ts.time_zone == 'Asia/Tokyo'
|
||||
|
||||
def test_timestamp_file_format_no_colons(self):
|
||||
"""Test that timestamp_file format doesn't contain colons (safe for filenames)"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings()
|
||||
|
||||
assert ':' not in ts.timestamp_file
|
||||
assert ' ' not in ts.timestamp_file
|
||||
assert ts.timestamp_file == '2023-12-25_153045'
|
||||
|
||||
def test_multiple_instances_independent(self):
|
||||
"""Test that multiple instances don't interfere with each other"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts1 = TimestampStrings(time_zone='Asia/Tokyo')
|
||||
ts2 = TimestampStrings(time_zone='Europe/London')
|
||||
|
||||
assert ts1.time_zone == 'Asia/Tokyo'
|
||||
assert ts2.time_zone == 'Europe/London'
|
||||
assert ts1.time_zone != ts2.time_zone
|
||||
|
||||
def test_zoneinfo_called_correctly_with_string(self):
|
||||
"""Test that ZoneInfo is called with correct timezone when passing string"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.ZoneInfo') as mock_zoneinfo:
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
custom_tz = 'Europe/Paris'
|
||||
ts = TimestampStrings(time_zone=custom_tz)
|
||||
assert ts.time_zone == custom_tz
|
||||
|
||||
mock_zoneinfo.assert_called_with(custom_tz)
|
||||
|
||||
def test_zoneinfo_object_parameter(self):
|
||||
"""Test that ZoneInfo objects can be passed directly as timezone parameter"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_now_tz = datetime(2023, 12, 25, 15, 30, 45, tzinfo=ZoneInfo('Europe/Paris'))
|
||||
mock_datetime.now.side_effect = [mock_now, mock_now_tz]
|
||||
|
||||
# Create a ZoneInfo object
|
||||
custom_tz_obj = ZoneInfo('Europe/Paris')
|
||||
ts = TimestampStrings(time_zone=custom_tz_obj)
|
||||
|
||||
# The time_zone should be the ZoneInfo object itself
|
||||
assert ts.time_zone_zi is custom_tz_obj
|
||||
assert isinstance(ts.time_zone_zi, ZoneInfo)
|
||||
|
||||
def test_zoneinfo_object_vs_string_equivalence(self):
|
||||
"""Test that ZoneInfo object and string produce equivalent results"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 15, 30, 45)
|
||||
mock_now_tz = datetime(2023, 12, 25, 15, 30, 45, tzinfo=ZoneInfo('Europe/Paris'))
|
||||
mock_datetime.now.side_effect = [mock_now, mock_now_tz, mock_now, mock_now_tz]
|
||||
|
||||
# Test with string
|
||||
ts_string = TimestampStrings(time_zone='Europe/Paris')
|
||||
|
||||
# Test with ZoneInfo object
|
||||
ts_zoneinfo = TimestampStrings(time_zone=ZoneInfo('Europe/Paris'))
|
||||
|
||||
# Both should produce the same timestamp formats (though time_zone attributes will differ)
|
||||
assert ts_string.today == ts_zoneinfo.today
|
||||
assert ts_string.timestamp == ts_zoneinfo.timestamp
|
||||
assert ts_string.timestamp_file == ts_zoneinfo.timestamp_file
|
||||
|
||||
# The time_zone attributes will be different types but represent the same timezone
|
||||
assert str(ts_string.time_zone) == 'Europe/Paris'
|
||||
assert isinstance(ts_zoneinfo.time_zone_zi, ZoneInfo)
|
||||
|
||||
def test_edge_case_midnight(self):
|
||||
"""Test timestamp formatting at midnight"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2023, 12, 25, 0, 0, 0)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings()
|
||||
|
||||
assert ts.timestamp == '2023-12-25 00:00:00'
|
||||
assert ts.timestamp_file == '2023-12-25_000000'
|
||||
|
||||
def test_edge_case_new_year(self):
|
||||
"""Test timestamp formatting at new year"""
|
||||
with patch('corelibs.datetime_handling.timestamp_strings.datetime') as mock_datetime:
|
||||
mock_now = datetime(2024, 1, 1, 0, 0, 0)
|
||||
mock_datetime.now.return_value = mock_now
|
||||
|
||||
ts = TimestampStrings()
|
||||
|
||||
assert ts.today == '2024-01-01'
|
||||
assert ts.timestamp == '2024-01-01 00:00:00'
|
||||
|
||||
# __END__
|
||||
3
tests/unit/db_handling/__init__.py
Normal file
3
tests/unit/db_handling/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
db_handling tests
|
||||
"""
|
||||
1133
tests/unit/db_handling/test_sqlite_io.py
Normal file
1133
tests/unit/db_handling/test_sqlite_io.py
Normal file
File diff suppressed because it is too large
Load Diff
639
tests/unit/debug_handling/test_debug_helpers.py
Normal file
639
tests/unit/debug_handling/test_debug_helpers.py
Normal file
@@ -0,0 +1,639 @@
|
||||
"""
|
||||
Unit tests for debug_handling.debug_helpers module
|
||||
"""
|
||||
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
from corelibs.debug_handling.debug_helpers import (
|
||||
call_stack,
|
||||
exception_stack,
|
||||
OptExcInfo
|
||||
)
|
||||
|
||||
|
||||
class TestCallStack:
|
||||
"""Test cases for call_stack function"""
|
||||
|
||||
def test_call_stack_basic(self):
|
||||
"""Test basic call_stack functionality"""
|
||||
result = call_stack()
|
||||
assert isinstance(result, str)
|
||||
assert "test_debug_helpers.py" in result
|
||||
assert "test_call_stack_basic" in result
|
||||
|
||||
def test_call_stack_with_default_separator(self):
|
||||
"""Test call_stack with default separator"""
|
||||
result = call_stack()
|
||||
assert " -> " in result
|
||||
|
||||
def test_call_stack_with_custom_separator(self):
|
||||
"""Test call_stack with custom separator"""
|
||||
result = call_stack(separator=" | ")
|
||||
assert " | " in result
|
||||
assert " -> " not in result
|
||||
|
||||
def test_call_stack_with_empty_separator(self):
|
||||
"""Test call_stack with empty separator (should default to ' -> ')"""
|
||||
result = call_stack(separator="")
|
||||
assert " -> " in result
|
||||
|
||||
def test_call_stack_format(self):
|
||||
"""Test call_stack output format (filename:function:lineno)"""
|
||||
result = call_stack()
|
||||
parts = result.split(" -> ")
|
||||
for part in parts:
|
||||
# Each part should have format: filename:function:lineno
|
||||
assert part.count(":") >= 2
|
||||
# Most parts should contain .py but some system frames might not
|
||||
# Just check that we have some .py files in the trace
|
||||
assert ".py" in result or "test_debug_helpers" in result
|
||||
|
||||
def test_call_stack_with_start_offset(self):
|
||||
"""Test call_stack with start offset"""
|
||||
result_no_offset = call_stack(start=0)
|
||||
result_with_offset = call_stack(start=2)
|
||||
|
||||
# With offset, we should get fewer frames
|
||||
parts_no_offset = result_no_offset.split(" -> ")
|
||||
parts_with_offset = result_with_offset.split(" -> ")
|
||||
|
||||
assert len(parts_with_offset) <= len(parts_no_offset)
|
||||
|
||||
def test_call_stack_with_skip_last(self):
|
||||
"""Test call_stack with skip_last parameter"""
|
||||
result_skip_default = call_stack(skip_last=-1)
|
||||
result_skip_more = call_stack(skip_last=-3)
|
||||
|
||||
# Skipping more should result in fewer frames
|
||||
parts_default = result_skip_default.split(" -> ")
|
||||
parts_more = result_skip_more.split(" -> ")
|
||||
|
||||
assert len(parts_more) <= len(parts_default)
|
||||
|
||||
def test_call_stack_skip_last_positive_converts_to_negative(self):
|
||||
"""Test that positive skip_last is converted to negative"""
|
||||
# Both should produce same result
|
||||
result_negative = call_stack(skip_last=-2)
|
||||
result_positive = call_stack(skip_last=2)
|
||||
|
||||
assert result_negative == result_positive
|
||||
|
||||
def test_call_stack_nested_calls(self):
|
||||
"""Test call_stack in nested function calls"""
|
||||
def level_one():
|
||||
return level_two()
|
||||
|
||||
def level_two():
|
||||
return level_three()
|
||||
|
||||
def level_three():
|
||||
return call_stack()
|
||||
|
||||
result = level_one()
|
||||
assert "level_one" in result
|
||||
assert "level_two" in result
|
||||
assert "level_three" in result
|
||||
|
||||
def test_call_stack_reset_start_if_empty_false(self):
|
||||
"""Test call_stack with high start value and reset_start_if_empty=False"""
|
||||
# Using a very high start value should result in empty stack
|
||||
result = call_stack(start=1000, reset_start_if_empty=False)
|
||||
assert result == ""
|
||||
|
||||
def test_call_stack_reset_start_if_empty_true(self):
|
||||
"""Test call_stack with high start value and reset_start_if_empty=True"""
|
||||
# Using a very high start value with reset should give non-empty result
|
||||
result = call_stack(start=1000, reset_start_if_empty=True)
|
||||
assert result != ""
|
||||
assert "test_debug_helpers.py" in result
|
||||
|
||||
def test_call_stack_contains_line_numbers(self):
|
||||
"""Test that call_stack includes line numbers"""
|
||||
result = call_stack()
|
||||
# Extract parts and check for numbers
|
||||
parts = result.split(" -> ")
|
||||
for part in parts:
|
||||
# Line numbers should be present (digits at the end)
|
||||
assert any(char.isdigit() for char in part)
|
||||
|
||||
def test_call_stack_separator_none(self):
|
||||
"""Test call_stack with None separator"""
|
||||
result = call_stack(separator="") # Use empty string instead of None
|
||||
# Empty string should be converted to default ' -> '
|
||||
assert " -> " in result
|
||||
|
||||
def test_call_stack_multiple_separators(self):
|
||||
"""Test call_stack with various custom separators"""
|
||||
separators = [" | ", " >> ", " => ", " / ", "\n"]
|
||||
|
||||
for sep in separators:
|
||||
result = call_stack(separator=sep)
|
||||
assert sep in result or result == "" # May be empty based on stack depth
|
||||
|
||||
|
||||
class TestExceptionStack:
|
||||
"""Test cases for exception_stack function"""
|
||||
|
||||
def test_exception_stack_with_active_exception(self):
|
||||
"""Test exception_stack when an exception is active"""
|
||||
try:
|
||||
raise ValueError("Test exception")
|
||||
except ValueError:
|
||||
result = exception_stack()
|
||||
assert isinstance(result, str)
|
||||
assert "test_debug_helpers.py" in result
|
||||
assert "test_exception_stack_with_active_exception" in result
|
||||
|
||||
def test_exception_stack_format(self):
|
||||
"""Test exception_stack output format"""
|
||||
try:
|
||||
raise RuntimeError("Test error")
|
||||
except RuntimeError:
|
||||
result = exception_stack()
|
||||
parts = result.split(" -> ")
|
||||
for part in parts:
|
||||
# Each part should have format: filename:function:lineno
|
||||
assert part.count(":") >= 2
|
||||
|
||||
def test_exception_stack_with_custom_separator(self):
|
||||
"""Test exception_stack with custom separator"""
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise TypeError("Test type error")
|
||||
inner_call()
|
||||
|
||||
try:
|
||||
nested_call()
|
||||
except TypeError:
|
||||
result = exception_stack(separator=" | ")
|
||||
# Only check separator if there are multiple frames
|
||||
if " | " in result or result.count(":") == 2:
|
||||
# Single frame or has separator
|
||||
assert isinstance(result, str)
|
||||
assert " -> " not in result
|
||||
|
||||
def test_exception_stack_with_empty_separator(self):
|
||||
"""Test exception_stack with empty separator (should default to ' -> ')"""
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise KeyError("Test key error")
|
||||
inner_call()
|
||||
|
||||
try:
|
||||
nested_call()
|
||||
except KeyError:
|
||||
result = exception_stack(separator="")
|
||||
# Should use default separator if multiple frames exist
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_exception_stack_separator_none(self):
|
||||
"""Test exception_stack with empty separator"""
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise IndexError("Test index error")
|
||||
inner_call()
|
||||
|
||||
try:
|
||||
nested_call()
|
||||
except IndexError:
|
||||
result = exception_stack(separator="") # Use empty string instead of None
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_exception_stack_nested_exceptions(self):
|
||||
"""Test exception_stack with nested function calls"""
|
||||
def level_one():
|
||||
level_two()
|
||||
|
||||
def level_two():
|
||||
level_three()
|
||||
|
||||
def level_three():
|
||||
raise ValueError("Nested exception")
|
||||
|
||||
try:
|
||||
level_one()
|
||||
except ValueError:
|
||||
result = exception_stack()
|
||||
# Should contain all levels in the stack
|
||||
assert "level_one" in result or "level_two" in result or "level_three" in result
|
||||
|
||||
def test_exception_stack_with_provided_exc_info(self):
|
||||
"""Test exception_stack with explicitly provided exc_info"""
|
||||
try:
|
||||
raise AttributeError("Test attribute error")
|
||||
except AttributeError:
|
||||
exc_info = sys.exc_info()
|
||||
result = exception_stack(exc_stack=exc_info)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_exception_stack_no_active_exception(self):
|
||||
"""Test exception_stack when no exception is active"""
|
||||
# This should handle the case gracefully
|
||||
# When no exception is active, sys.exc_info() returns (None, None, None)
|
||||
result = exception_stack()
|
||||
# With no traceback, should return empty string or handle gracefully
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_exception_stack_contains_line_numbers(self):
|
||||
"""Test that exception_stack includes line numbers"""
|
||||
try:
|
||||
raise OSError("Test OS error")
|
||||
except OSError:
|
||||
result = exception_stack()
|
||||
if result: # May be empty
|
||||
parts = result.split(" -> ")
|
||||
for part in parts:
|
||||
# Line numbers should be present
|
||||
assert any(char.isdigit() for char in part)
|
||||
|
||||
def test_exception_stack_multiple_exceptions(self):
|
||||
"""Test exception_stack captures the current exception only"""
|
||||
first_result = None
|
||||
second_result = None
|
||||
|
||||
try:
|
||||
raise ValueError("First exception")
|
||||
except ValueError:
|
||||
first_result = exception_stack()
|
||||
|
||||
try:
|
||||
raise TypeError("Second exception")
|
||||
except TypeError:
|
||||
second_result = exception_stack()
|
||||
|
||||
# Both should be valid but may differ
|
||||
assert isinstance(first_result, str)
|
||||
assert isinstance(second_result, str)
|
||||
|
||||
def test_exception_stack_with_multiple_separators(self):
|
||||
"""Test exception_stack with various custom separators"""
|
||||
separators = [" | ", " >> ", " => ", " / ", "\n"]
|
||||
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise ValueError("Test exception")
|
||||
inner_call()
|
||||
|
||||
for sep in separators:
|
||||
try:
|
||||
nested_call()
|
||||
except ValueError:
|
||||
result = exception_stack(separator=sep)
|
||||
assert isinstance(result, str)
|
||||
# Separator only appears if there are multiple frames
|
||||
|
||||
|
||||
class TestOptExcInfo:
|
||||
"""Test cases for OptExcInfo type definition"""
|
||||
|
||||
def test_opt_exc_info_type_none_tuple(self):
|
||||
"""Test OptExcInfo can be None tuple"""
|
||||
exc_info: OptExcInfo = (None, None, None)
|
||||
assert exc_info == (None, None, None)
|
||||
|
||||
def test_opt_exc_info_type_exception_tuple(self):
|
||||
"""Test OptExcInfo can be exception tuple"""
|
||||
try:
|
||||
raise ValueError("Test")
|
||||
except ValueError:
|
||||
exc_info: OptExcInfo = sys.exc_info()
|
||||
assert exc_info[0] is not None
|
||||
assert exc_info[1] is not None
|
||||
assert exc_info[2] is not None
|
||||
|
||||
def test_opt_exc_info_with_exception_stack(self):
|
||||
"""Test that OptExcInfo works with exception_stack function"""
|
||||
try:
|
||||
raise RuntimeError("Test runtime error")
|
||||
except RuntimeError:
|
||||
exc_info = sys.exc_info()
|
||||
result = exception_stack(exc_stack=exc_info)
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests combining multiple scenarios"""
|
||||
|
||||
def test_call_stack_and_exception_stack_together(self):
|
||||
"""Test using both call_stack and exception_stack in error handling"""
|
||||
def faulty_function():
|
||||
_ = call_stack() # Get call stack before exception
|
||||
raise ValueError("Intentional error")
|
||||
|
||||
try:
|
||||
faulty_function()
|
||||
except ValueError:
|
||||
exception_trace = exception_stack()
|
||||
|
||||
assert isinstance(exception_trace, str)
|
||||
assert "faulty_function" in exception_trace or "test_debug_helpers.py" in exception_trace
|
||||
|
||||
def test_nested_exception_with_call_stack(self):
|
||||
"""Test call_stack within exception handling"""
|
||||
def outer():
|
||||
return inner()
|
||||
|
||||
def inner():
|
||||
try:
|
||||
raise RuntimeError("Inner error")
|
||||
except RuntimeError:
|
||||
return {
|
||||
'call_stack': call_stack(),
|
||||
'exception_stack': exception_stack()
|
||||
}
|
||||
|
||||
result = outer()
|
||||
assert 'call_stack' in result
|
||||
assert 'exception_stack' in result
|
||||
assert isinstance(result['call_stack'], str)
|
||||
assert isinstance(result['exception_stack'], str)
|
||||
|
||||
def test_multiple_nested_levels(self):
|
||||
"""Test with multiple nested function levels"""
|
||||
def level_a():
|
||||
return level_b()
|
||||
|
||||
def level_b():
|
||||
return level_c()
|
||||
|
||||
def level_c():
|
||||
return level_d()
|
||||
|
||||
def level_d():
|
||||
try:
|
||||
raise ValueError("Deep error")
|
||||
except ValueError:
|
||||
return {
|
||||
'call': call_stack(),
|
||||
'exception': exception_stack()
|
||||
}
|
||||
|
||||
result = level_a()
|
||||
# Should contain information about the call chain
|
||||
assert result['call']
|
||||
assert result['exception']
|
||||
|
||||
def test_different_separators_consistency(self):
|
||||
"""Test that different separators work consistently"""
|
||||
separators = [" -> ", " | ", " / ", " >> "]
|
||||
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise ValueError("Test")
|
||||
inner_call()
|
||||
|
||||
for sep in separators:
|
||||
try:
|
||||
nested_call()
|
||||
except ValueError:
|
||||
exc_result = exception_stack(separator=sep)
|
||||
call_result = call_stack(separator=sep)
|
||||
|
||||
assert isinstance(exc_result, str)
|
||||
assert isinstance(call_result, str)
|
||||
# Both should be valid strings (separator check only if multiple frames)
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
"""Test edge cases and boundary conditions"""
|
||||
|
||||
def test_call_stack_with_zero_start(self):
|
||||
"""Test call_stack with start=0 (should include all frames)"""
|
||||
result = call_stack(start=0)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_call_stack_with_large_skip_last(self):
|
||||
"""Test call_stack with very large skip_last value"""
|
||||
result = call_stack(skip_last=-100)
|
||||
# Should handle gracefully, may be empty
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_exception_stack_none_exc_info(self):
|
||||
"""Test exception_stack with None as exc_stack"""
|
||||
result = exception_stack(exc_stack=None)
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_exception_stack_empty_tuple(self):
|
||||
"""Test exception_stack with empty exception info"""
|
||||
exc_info: OptExcInfo = (None, None, None)
|
||||
result = exception_stack(exc_stack=exc_info)
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_call_stack_special_characters_in_separator(self):
|
||||
"""Test call_stack with special characters in separator"""
|
||||
special_separators = ["\n", "\t", "->", "||", "//"]
|
||||
|
||||
for sep in special_separators:
|
||||
result = call_stack(separator=sep)
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_very_deep_call_stack(self):
|
||||
"""Test call_stack with very deep recursion (up to a limit)"""
|
||||
def recursive_call(depth: int, max_depth: int = 5) -> str:
|
||||
if depth >= max_depth:
|
||||
return call_stack()
|
||||
return recursive_call(depth + 1, max_depth)
|
||||
|
||||
result = recursive_call(0)
|
||||
assert isinstance(result, str)
|
||||
# Should contain multiple recursive_call entries
|
||||
assert result.count("recursive_call") > 0
|
||||
|
||||
def test_exception_stack_different_exception_types(self):
|
||||
"""Test exception_stack with various exception types"""
|
||||
exception_types = [
|
||||
ValueError("value"),
|
||||
TypeError("type"),
|
||||
KeyError("key"),
|
||||
IndexError("index"),
|
||||
AttributeError("attr"),
|
||||
RuntimeError("runtime"),
|
||||
]
|
||||
|
||||
for exc in exception_types:
|
||||
try:
|
||||
raise exc
|
||||
except (ValueError, TypeError, KeyError, IndexError, AttributeError, RuntimeError):
|
||||
result = exception_stack()
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestRealWorldScenarios:
|
||||
"""Test real-world debugging scenarios"""
|
||||
|
||||
def test_debugging_workflow(self):
|
||||
"""Test typical debugging workflow with both functions"""
|
||||
def process_data(data: str) -> str:
|
||||
_ = call_stack() # Capture call stack for debugging
|
||||
if not data:
|
||||
raise ValueError("No data provided")
|
||||
return data.upper()
|
||||
|
||||
# Success case
|
||||
result = process_data("test")
|
||||
assert result == "TEST"
|
||||
|
||||
# Error case
|
||||
try:
|
||||
process_data("")
|
||||
except ValueError:
|
||||
exc_trace = exception_stack()
|
||||
assert isinstance(exc_trace, str)
|
||||
|
||||
def test_logging_context(self):
|
||||
"""Test using call_stack for logging context"""
|
||||
def get_logging_context():
|
||||
return {
|
||||
'timestamp': 'now',
|
||||
'stack': call_stack(start=1, separator=" > "),
|
||||
'function': 'get_logging_context'
|
||||
}
|
||||
|
||||
context = get_logging_context()
|
||||
assert 'stack' in context
|
||||
assert 'timestamp' in context
|
||||
assert isinstance(context['stack'], str)
|
||||
|
||||
def test_error_reporting(self):
|
||||
"""Test comprehensive error reporting"""
|
||||
def dangerous_operation() -> dict[str, str]:
|
||||
try:
|
||||
# Simulate some operation
|
||||
_ = 1 / 0
|
||||
except ZeroDivisionError:
|
||||
return {
|
||||
'error': 'Division by zero',
|
||||
'call_stack': call_stack(),
|
||||
'exception_stack': exception_stack(),
|
||||
}
|
||||
return {} # Fallback return
|
||||
|
||||
error_report = dangerous_operation()
|
||||
assert error_report is not None
|
||||
assert 'error' in error_report
|
||||
assert 'call_stack' in error_report
|
||||
assert 'exception_stack' in error_report
|
||||
assert error_report['error'] == 'Division by zero'
|
||||
|
||||
def test_function_tracing(self):
|
||||
"""Test function call tracing"""
|
||||
traces: list[str] = []
|
||||
|
||||
def traced_function_a() -> str:
|
||||
traces.append(call_stack())
|
||||
return traced_function_b()
|
||||
|
||||
def traced_function_b() -> str:
|
||||
traces.append(call_stack())
|
||||
return traced_function_c()
|
||||
|
||||
def traced_function_c() -> str:
|
||||
traces.append(call_stack())
|
||||
return "done"
|
||||
|
||||
result = traced_function_a()
|
||||
assert result == "done"
|
||||
assert len(traces) == 3
|
||||
# Each trace should be different (different call depths)
|
||||
assert all(isinstance(t, str) for t in traces)
|
||||
|
||||
def test_exception_chain_tracking(self):
|
||||
"""Test tracking exception chains"""
|
||||
exception_traces: list[str] = []
|
||||
|
||||
def operation_one() -> None:
|
||||
try:
|
||||
operation_two()
|
||||
except ValueError:
|
||||
exception_traces.append(exception_stack())
|
||||
raise
|
||||
|
||||
def operation_two() -> None:
|
||||
try:
|
||||
operation_three()
|
||||
except TypeError as exc:
|
||||
exception_traces.append(exception_stack())
|
||||
raise ValueError("Wrapped error") from exc
|
||||
|
||||
def operation_three() -> None:
|
||||
raise TypeError("Original error")
|
||||
|
||||
try:
|
||||
operation_one()
|
||||
except ValueError:
|
||||
exception_traces.append(exception_stack())
|
||||
|
||||
# Should have captured multiple exception stacks
|
||||
assert len(exception_traces) > 0
|
||||
assert all(isinstance(t, str) for t in exception_traces)
|
||||
|
||||
|
||||
class TestParametrized:
|
||||
"""Parametrized tests for comprehensive coverage"""
|
||||
|
||||
@pytest.mark.parametrize("start", [0, 1, 2, 5, 10])
|
||||
def test_call_stack_various_starts(self, start: int) -> None:
|
||||
"""Test call_stack with various start values"""
|
||||
result = call_stack(start=start)
|
||||
assert isinstance(result, str)
|
||||
|
||||
@pytest.mark.parametrize("skip_last", [-1, -2, -3, -5, 1, 2, 3, 5])
|
||||
def test_call_stack_various_skip_lasts(self, skip_last: int) -> None:
|
||||
"""Test call_stack with various skip_last values"""
|
||||
result = call_stack(skip_last=skip_last)
|
||||
assert isinstance(result, str)
|
||||
|
||||
@pytest.mark.parametrize("separator", [" -> ", " | ", " / ", " >> ", " => ", "\n", "\t"])
|
||||
def test_call_stack_various_separators(self, separator: str) -> None:
|
||||
"""Test call_stack with various separators"""
|
||||
result = call_stack(separator=separator)
|
||||
assert isinstance(result, str)
|
||||
if result:
|
||||
assert separator in result
|
||||
|
||||
@pytest.mark.parametrize("reset_start", [True, False])
|
||||
def test_call_stack_reset_start_variations(self, reset_start: bool) -> None:
|
||||
"""Test call_stack with reset_start_if_empty variations"""
|
||||
result = call_stack(start=100, reset_start_if_empty=reset_start)
|
||||
assert isinstance(result, str)
|
||||
if reset_start:
|
||||
assert len(result) > 0 # Should have content after reset
|
||||
else:
|
||||
assert len(result) == 0 # Should be empty
|
||||
|
||||
@pytest.mark.parametrize("separator", [" -> ", " | ", " / ", " >> ", "\n"])
|
||||
def test_exception_stack_various_separators(self, separator: str) -> None:
|
||||
"""Test exception_stack with various separators"""
|
||||
def nested_call():
|
||||
def inner_call():
|
||||
raise ValueError("Test")
|
||||
inner_call()
|
||||
|
||||
try:
|
||||
nested_call()
|
||||
except ValueError:
|
||||
result = exception_stack(separator=separator)
|
||||
assert isinstance(result, str)
|
||||
# Check that result is valid (separator only if multiple frames exist)
|
||||
|
||||
@pytest.mark.parametrize("exception_type", [
|
||||
ValueError,
|
||||
TypeError,
|
||||
KeyError,
|
||||
IndexError,
|
||||
AttributeError,
|
||||
RuntimeError,
|
||||
OSError,
|
||||
])
|
||||
def test_exception_stack_various_exception_types(self, exception_type: type[Exception]) -> None:
|
||||
"""Test exception_stack with various exception types"""
|
||||
try:
|
||||
raise exception_type("Test exception")
|
||||
except (ValueError, TypeError, KeyError, IndexError, AttributeError, RuntimeError, OSError):
|
||||
result = exception_stack()
|
||||
assert isinstance(result, str)
|
||||
|
||||
# __END__
|
||||
288
tests/unit/debug_handling/test_dump_data.py
Normal file
288
tests/unit/debug_handling/test_dump_data.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
Unit tests for debug_handling.dump_data module
|
||||
"""
|
||||
|
||||
import json
|
||||
from datetime import datetime, date
|
||||
from decimal import Decimal
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from corelibs.debug_handling.dump_data import dump_data
|
||||
|
||||
|
||||
class TestDumpData:
|
||||
"""Test cases for dump_data function"""
|
||||
|
||||
def test_dump_simple_dict(self):
|
||||
"""Test dumping a simple dictionary"""
|
||||
data = {"name": "John", "age": 30}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_simple_list(self):
|
||||
"""Test dumping a simple list"""
|
||||
data = [1, 2, 3, 4, 5]
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_nested_dict(self):
|
||||
"""Test dumping a nested dictionary"""
|
||||
data = {
|
||||
"user": {
|
||||
"name": "Alice",
|
||||
"address": {
|
||||
"city": "Tokyo",
|
||||
"country": "Japan"
|
||||
}
|
||||
}
|
||||
}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_mixed_types(self):
|
||||
"""Test dumping data with mixed types"""
|
||||
data = {
|
||||
"string": "test",
|
||||
"number": 42,
|
||||
"float": 3.14,
|
||||
"boolean": True,
|
||||
"null": None,
|
||||
"list": [1, 2, 3]
|
||||
}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_with_indent_default(self):
|
||||
"""Test that indent is applied by default"""
|
||||
data = {"a": 1, "b": 2}
|
||||
result = dump_data(data)
|
||||
|
||||
# With indent, result should contain newlines
|
||||
assert "\n" in result
|
||||
assert " " in result # 4 spaces for indent
|
||||
|
||||
def test_dump_with_indent_true(self):
|
||||
"""Test explicit indent=True"""
|
||||
data = {"a": 1, "b": 2}
|
||||
result = dump_data(data, use_indent=True)
|
||||
|
||||
# With indent, result should contain newlines
|
||||
assert "\n" in result
|
||||
assert " " in result # 4 spaces for indent
|
||||
|
||||
def test_dump_without_indent(self):
|
||||
"""Test dumping without indentation"""
|
||||
data = {"a": 1, "b": 2}
|
||||
result = dump_data(data, use_indent=False)
|
||||
|
||||
# Without indent, result should be compact
|
||||
assert "\n" not in result
|
||||
assert result == '{"a": 1, "b": 2}'
|
||||
|
||||
def test_dump_unicode_characters(self):
|
||||
"""Test that unicode characters are preserved (ensure_ascii=False)"""
|
||||
data = {"message": "こんにちは", "emoji": "😀", "german": "Müller"}
|
||||
result = dump_data(data)
|
||||
|
||||
# Unicode characters should be preserved, not escaped
|
||||
assert "こんにちは" in result
|
||||
assert "😀" in result
|
||||
assert "Müller" in result
|
||||
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_datetime_object(self):
|
||||
"""Test dumping data with datetime objects (using default=str)"""
|
||||
now = datetime(2023, 10, 15, 14, 30, 0)
|
||||
data = {"timestamp": now}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
# datetime should be converted to string
|
||||
assert "2023-10-15" in result
|
||||
|
||||
def test_dump_date_object(self):
|
||||
"""Test dumping data with date objects"""
|
||||
today = date(2023, 10, 15)
|
||||
data = {"date": today}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "2023-10-15" in result
|
||||
|
||||
def test_dump_decimal_object(self):
|
||||
"""Test dumping data with Decimal objects"""
|
||||
data = {"amount": Decimal("123.45")}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "123.45" in result
|
||||
|
||||
def test_dump_empty_dict(self):
|
||||
"""Test dumping an empty dictionary"""
|
||||
data = {}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == {}
|
||||
|
||||
def test_dump_empty_list(self):
|
||||
"""Test dumping an empty list"""
|
||||
data = []
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == []
|
||||
|
||||
def test_dump_string_directly(self):
|
||||
"""Test dumping a string directly"""
|
||||
data = "Hello, World!"
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_number_directly(self):
|
||||
"""Test dumping a number directly"""
|
||||
data = 42
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_boolean_directly(self):
|
||||
"""Test dumping a boolean directly"""
|
||||
data = True
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed is True
|
||||
|
||||
def test_dump_none_directly(self):
|
||||
"""Test dumping None directly"""
|
||||
data = None
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "null"
|
||||
parsed = json.loads(result)
|
||||
assert parsed is None
|
||||
|
||||
def test_dump_complex_nested_structure(self):
|
||||
"""Test dumping a complex nested structure"""
|
||||
data = {
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"tags": ["admin", "user"],
|
||||
"metadata": {
|
||||
"created": datetime(2023, 1, 1),
|
||||
"active": True
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"tags": ["user"],
|
||||
"metadata": {
|
||||
"created": datetime(2023, 6, 15),
|
||||
"active": False
|
||||
}
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
# Check that it's valid JSON
|
||||
parsed = json.loads(result)
|
||||
assert len(parsed["users"]) == 2
|
||||
assert parsed["total"] == 2
|
||||
|
||||
def test_dump_special_characters(self):
|
||||
"""Test dumping data with special characters"""
|
||||
data = {
|
||||
"quote": 'He said "Hello"',
|
||||
"backslash": "path\\to\\file",
|
||||
"newline": "line1\nline2",
|
||||
"tab": "col1\tcol2"
|
||||
}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
|
||||
def test_dump_large_numbers(self):
|
||||
"""Test dumping large numbers"""
|
||||
data = {
|
||||
"big_int": 123456789012345678901234567890,
|
||||
"big_float": 1.23456789e100
|
||||
}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed["big_int"] == data["big_int"]
|
||||
|
||||
def test_dump_list_of_dicts(self):
|
||||
"""Test dumping a list of dictionaries"""
|
||||
data = [
|
||||
{"id": 1, "name": "Item 1"},
|
||||
{"id": 2, "name": "Item 2"},
|
||||
{"id": 3, "name": "Item 3"}
|
||||
]
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
parsed = json.loads(result)
|
||||
assert parsed == data
|
||||
assert len(parsed) == 3
|
||||
|
||||
|
||||
class CustomObject:
|
||||
"""Custom class for testing default=str conversion"""
|
||||
def __init__(self, value: Any):
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return f"CustomObject({self.value})"
|
||||
|
||||
|
||||
class TestDumpDataWithCustomObjects:
|
||||
"""Test cases for dump_data with custom objects"""
|
||||
|
||||
def test_dump_custom_object(self):
|
||||
"""Test that custom objects are converted using str()"""
|
||||
obj = CustomObject("test")
|
||||
data = {"custom": obj}
|
||||
result = dump_data(data)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "CustomObject(test)" in result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
560
tests/unit/debug_handling/test_profiling.py
Normal file
560
tests/unit/debug_handling/test_profiling.py
Normal file
@@ -0,0 +1,560 @@
|
||||
"""
|
||||
Unit tests for corelibs.debug_handling.profiling module
|
||||
"""
|
||||
|
||||
import time
|
||||
import tracemalloc
|
||||
|
||||
from corelibs.debug_handling.profiling import display_top, Profiling
|
||||
|
||||
|
||||
class TestDisplayTop:
|
||||
"""Test display_top function"""
|
||||
|
||||
def test_display_top_basic(self):
|
||||
"""Test that display_top returns a string with basic stats"""
|
||||
tracemalloc.start()
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 10000
|
||||
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Top 10 lines" in result
|
||||
assert "KiB" in result
|
||||
assert "Total allocated size:" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_display_top_with_custom_limit(self):
|
||||
"""Test display_top with custom limit parameter"""
|
||||
tracemalloc.start()
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 10000
|
||||
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot, limit=5)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Top 5 lines" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_display_top_with_different_key_type(self):
|
||||
"""Test display_top with different key_type parameter"""
|
||||
tracemalloc.start()
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 10000
|
||||
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot, key_type='filename')
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Top 10 lines" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_display_top_filters_traces(self):
|
||||
"""Test that display_top filters out bootstrap and unknown traces"""
|
||||
tracemalloc.start()
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 10000
|
||||
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot)
|
||||
|
||||
# Should not contain filtered traces
|
||||
assert "<frozen importlib._bootstrap>" not in result
|
||||
assert "<unknown>" not in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_display_top_with_limit_larger_than_stats(self):
|
||||
"""Test display_top when limit is larger than available stats"""
|
||||
tracemalloc.start()
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 100
|
||||
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot, limit=1000)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Top 1000 lines" in result
|
||||
assert "Total allocated size:" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_display_top_empty_snapshot(self):
|
||||
"""Test display_top with a snapshot that has minimal traces"""
|
||||
tracemalloc.start()
|
||||
snapshot = tracemalloc.take_snapshot()
|
||||
tracemalloc.stop()
|
||||
|
||||
result = display_top(snapshot, limit=1)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Top 1 lines" in result
|
||||
|
||||
|
||||
class TestProfilingInitialization:
|
||||
"""Test Profiling class initialization"""
|
||||
|
||||
def test_profiling_initialization(self):
|
||||
"""Test that Profiling initializes correctly"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Should be able to create instance
|
||||
assert isinstance(profiler, Profiling)
|
||||
|
||||
def test_profiling_initial_state(self):
|
||||
"""Test that Profiling starts in a clean state"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Should not raise an error when calling end_profiling
|
||||
# even though start_profiling wasn't called
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestProfilingStartEnd:
|
||||
"""Test start_profiling and end_profiling functionality"""
|
||||
|
||||
def test_start_profiling(self):
|
||||
"""Test that start_profiling can be called"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Should not raise an error
|
||||
profiler.start_profiling("test_operation")
|
||||
|
||||
def test_end_profiling(self):
|
||||
"""Test that end_profiling can be called"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("test_operation")
|
||||
|
||||
# Should not raise an error
|
||||
profiler.end_profiling()
|
||||
|
||||
def test_start_profiling_with_different_idents(self):
|
||||
"""Test start_profiling with different identifier strings"""
|
||||
profiler = Profiling()
|
||||
|
||||
identifiers = ["short", "longer_identifier", "very_long_identifier_with_many_chars"]
|
||||
|
||||
for ident in identifiers:
|
||||
profiler.start_profiling(ident)
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert ident in result
|
||||
|
||||
def test_end_profiling_without_start(self):
|
||||
"""Test that end_profiling can be called without start_profiling"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Should not raise an error but internal state should indicate warning
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_profiling_measures_time(self):
|
||||
"""Test that profiling measures elapsed time"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("time_test")
|
||||
|
||||
sleep_duration = 0.05 # 50ms
|
||||
time.sleep(sleep_duration)
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "time:" in result
|
||||
# Should have some time measurement
|
||||
assert "ms" in result or "s" in result
|
||||
|
||||
def test_profiling_measures_memory(self):
|
||||
"""Test that profiling measures memory usage"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("memory_test")
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 100000
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "RSS:" in result
|
||||
assert "VMS:" in result
|
||||
assert "time:" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
|
||||
class TestProfilingPrintProfiling:
|
||||
"""Test print_profiling functionality"""
|
||||
|
||||
def test_print_profiling_returns_string(self):
|
||||
"""Test that print_profiling returns a string"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("test")
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_print_profiling_contains_identifier(self):
|
||||
"""Test that print_profiling includes the identifier"""
|
||||
profiler = Profiling()
|
||||
identifier = "my_test_operation"
|
||||
|
||||
profiler.start_profiling(identifier)
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert identifier in result
|
||||
|
||||
def test_print_profiling_format(self):
|
||||
"""Test that print_profiling has expected format"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("test")
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
# Check for expected components
|
||||
assert "Profiling:" in result
|
||||
assert "RSS:" in result
|
||||
assert "VMS:" in result
|
||||
assert "time:" in result
|
||||
|
||||
def test_print_profiling_multiple_calls(self):
|
||||
"""Test that print_profiling can be called multiple times"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("test")
|
||||
profiler.end_profiling()
|
||||
|
||||
result1 = profiler.print_profiling()
|
||||
result2 = profiler.print_profiling()
|
||||
|
||||
# Should return the same result
|
||||
assert result1 == result2
|
||||
|
||||
def test_print_profiling_time_formats(self):
|
||||
"""Test different time format outputs"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Very short duration (milliseconds)
|
||||
profiler.start_profiling("ms_test")
|
||||
time.sleep(0.001)
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
assert "ms" in result
|
||||
|
||||
# Slightly longer duration (seconds)
|
||||
profiler.start_profiling("s_test")
|
||||
time.sleep(0.1)
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
# Could be ms or s depending on timing
|
||||
assert ("ms" in result or "s" in result)
|
||||
|
||||
def test_print_profiling_memory_formats(self):
|
||||
"""Test different memory format outputs"""
|
||||
profiler = Profiling()
|
||||
profiler.start_profiling("memory_format_test")
|
||||
|
||||
# Allocate some memory
|
||||
data = [0] * 50000
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
# Should have some memory unit (B, kB, MB, GB)
|
||||
assert any(unit in result for unit in ["B", "kB", "MB", "GB"])
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
|
||||
class TestProfilingIntegration:
|
||||
"""Integration tests for Profiling class"""
|
||||
|
||||
def test_complete_profiling_cycle(self):
|
||||
"""Test a complete profiling cycle from start to print"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("complete_cycle")
|
||||
|
||||
# Do some work
|
||||
data = [i for i in range(10000)]
|
||||
time.sleep(0.01)
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "complete_cycle" in result
|
||||
assert "RSS:" in result
|
||||
assert "VMS:" in result
|
||||
assert "time:" in result
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_multiple_profiling_sessions(self):
|
||||
"""Test running multiple profiling sessions"""
|
||||
profiler = Profiling()
|
||||
|
||||
# First session
|
||||
profiler.start_profiling("session_1")
|
||||
time.sleep(0.01)
|
||||
profiler.end_profiling()
|
||||
result1 = profiler.print_profiling()
|
||||
|
||||
# Second session (same profiler instance)
|
||||
profiler.start_profiling("session_2")
|
||||
data = [0] * 100000
|
||||
time.sleep(0.01)
|
||||
profiler.end_profiling()
|
||||
result2 = profiler.print_profiling()
|
||||
|
||||
# Results should be different
|
||||
assert "session_1" in result1
|
||||
assert "session_2" in result2
|
||||
assert result1 != result2
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
def test_profiling_with_zero_work(self):
|
||||
"""Test profiling with minimal work"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("zero_work")
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "zero_work" in result
|
||||
|
||||
def test_profiling_with_heavy_computation(self):
|
||||
"""Test profiling with heavier computation"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("heavy_computation")
|
||||
|
||||
# Do some computation
|
||||
result_data: list[list[int]] = []
|
||||
for _ in range(1000):
|
||||
result_data.append([j * 2 for j in range(100)])
|
||||
|
||||
time.sleep(0.05)
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "heavy_computation" in result
|
||||
# Should show measurable time and memory
|
||||
assert "time:" in result
|
||||
|
||||
# Clean up
|
||||
del result_data
|
||||
|
||||
def test_independent_profilers(self):
|
||||
"""Test that multiple Profiling instances are independent"""
|
||||
profiler1 = Profiling()
|
||||
profiler2 = Profiling()
|
||||
|
||||
profiler1.start_profiling("profiler_1")
|
||||
time.sleep(0.01)
|
||||
|
||||
profiler2.start_profiling("profiler_2")
|
||||
data = [0] * 100000
|
||||
time.sleep(0.01)
|
||||
|
||||
profiler1.end_profiling()
|
||||
profiler2.end_profiling()
|
||||
|
||||
result1 = profiler1.print_profiling()
|
||||
result2 = profiler2.print_profiling()
|
||||
|
||||
# Should have different identifiers
|
||||
assert "profiler_1" in result1
|
||||
assert "profiler_2" in result2
|
||||
|
||||
# Results should be different
|
||||
assert result1 != result2
|
||||
|
||||
# Clean up
|
||||
del data
|
||||
|
||||
|
||||
class TestProfilingEdgeCases:
|
||||
"""Test edge cases and boundary conditions"""
|
||||
|
||||
def test_empty_identifier(self):
|
||||
"""Test profiling with empty identifier"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("")
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "Profiling:" in result
|
||||
|
||||
def test_very_long_identifier(self):
|
||||
"""Test profiling with very long identifier"""
|
||||
profiler = Profiling()
|
||||
|
||||
long_ident = "a" * 100
|
||||
|
||||
profiler.start_profiling(long_ident)
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert long_ident in result
|
||||
|
||||
def test_special_characters_in_identifier(self):
|
||||
"""Test profiling with special characters in identifier"""
|
||||
profiler = Profiling()
|
||||
|
||||
special_ident = "test_@#$%_operation"
|
||||
|
||||
profiler.start_profiling(special_ident)
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert special_ident in result
|
||||
|
||||
def test_rapid_consecutive_profiling(self):
|
||||
"""Test rapid consecutive profiling cycles"""
|
||||
profiler = Profiling()
|
||||
|
||||
for i in range(5):
|
||||
profiler.start_profiling(f"rapid_{i}")
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert f"rapid_{i}" in result
|
||||
|
||||
def test_profiling_negative_memory_change(self):
|
||||
"""Test profiling when memory usage decreases"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Allocate some memory before profiling
|
||||
pre_data = [0] * 1000000
|
||||
|
||||
profiler.start_profiling("memory_decrease")
|
||||
|
||||
# Free the memory
|
||||
del pre_data
|
||||
|
||||
profiler.end_profiling()
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "memory_decrease" in result
|
||||
# Should handle negative memory change gracefully
|
||||
|
||||
def test_very_short_duration(self):
|
||||
"""Test profiling with extremely short duration"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("instant")
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert "instant" in result
|
||||
assert "ms" in result # Should show milliseconds for very short duration
|
||||
|
||||
|
||||
class TestProfilingContextManager:
|
||||
"""Test profiling usage patterns similar to context managers"""
|
||||
|
||||
def test_typical_usage_pattern(self):
|
||||
"""Test typical usage pattern for profiling"""
|
||||
profiler = Profiling()
|
||||
|
||||
# Typical pattern
|
||||
profiler.start_profiling("typical_operation")
|
||||
|
||||
# Perform operation
|
||||
result_list: list[int] = []
|
||||
for _ in range(1000):
|
||||
result_list.append(_ * 2)
|
||||
|
||||
profiler.end_profiling()
|
||||
|
||||
# Get results
|
||||
output = profiler.print_profiling()
|
||||
|
||||
assert isinstance(output, str)
|
||||
assert "typical_operation" in output
|
||||
|
||||
# Clean up
|
||||
del result_list
|
||||
|
||||
def test_profiling_without_end(self):
|
||||
"""Test what happens when end_profiling is not called"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.start_profiling("no_end")
|
||||
|
||||
# Don't call end_profiling
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
# Should still return a string (though data might be incomplete)
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_profiling_end_without_start(self):
|
||||
"""Test calling end_profiling multiple times without start"""
|
||||
profiler = Profiling()
|
||||
|
||||
profiler.end_profiling()
|
||||
profiler.end_profiling()
|
||||
|
||||
result = profiler.print_profiling()
|
||||
|
||||
assert isinstance(result, str)
|
||||
|
||||
# __END__
|
||||
405
tests/unit/debug_handling/test_timer.py
Normal file
405
tests/unit/debug_handling/test_timer.py
Normal file
@@ -0,0 +1,405 @@
|
||||
"""
|
||||
Unit tests for corelibs.debug_handling.timer module
|
||||
"""
|
||||
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from corelibs.debug_handling.timer import Timer
|
||||
|
||||
|
||||
class TestTimerInitialization:
|
||||
"""Test Timer class initialization"""
|
||||
|
||||
def test_timer_initialization(self):
|
||||
"""Test that Timer initializes with correct default values"""
|
||||
timer = Timer()
|
||||
|
||||
# Check that start times are set
|
||||
assert isinstance(timer.get_overall_start_time(), datetime)
|
||||
assert isinstance(timer.get_start_time(), datetime)
|
||||
|
||||
# Check that end times are None
|
||||
assert timer.get_overall_end_time() is None
|
||||
assert timer.get_end_time() is None
|
||||
|
||||
# Check that run times are None
|
||||
assert timer.get_overall_run_time() is None
|
||||
assert timer.get_run_time() is None
|
||||
|
||||
def test_timer_start_times_are_recent(self):
|
||||
"""Test that start times are set to current time on initialization"""
|
||||
before_init = datetime.now()
|
||||
timer = Timer()
|
||||
after_init = datetime.now()
|
||||
|
||||
overall_start = timer.get_overall_start_time()
|
||||
start = timer.get_start_time()
|
||||
|
||||
assert before_init <= overall_start <= after_init
|
||||
assert before_init <= start <= after_init
|
||||
|
||||
def test_timer_start_times_are_same(self):
|
||||
"""Test that overall_start_time and start_time are initialized to the same time"""
|
||||
timer = Timer()
|
||||
|
||||
overall_start = timer.get_overall_start_time()
|
||||
start = timer.get_start_time()
|
||||
|
||||
# They should be very close (within a few microseconds)
|
||||
time_diff = abs((overall_start - start).total_seconds())
|
||||
assert time_diff < 0.001 # Less than 1 millisecond
|
||||
|
||||
|
||||
class TestOverallRunTime:
|
||||
"""Test overall run time functionality"""
|
||||
|
||||
def test_overall_run_time_returns_timedelta(self):
|
||||
"""Test that overall_run_time returns a timedelta object"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01) # Sleep for 10ms
|
||||
|
||||
result = timer.overall_run_time()
|
||||
|
||||
assert isinstance(result, timedelta)
|
||||
|
||||
def test_overall_run_time_sets_end_time(self):
|
||||
"""Test that calling overall_run_time sets the end time"""
|
||||
timer = Timer()
|
||||
|
||||
assert timer.get_overall_end_time() is None
|
||||
|
||||
timer.overall_run_time()
|
||||
|
||||
assert isinstance(timer.get_overall_end_time(), datetime)
|
||||
|
||||
def test_overall_run_time_sets_run_time(self):
|
||||
"""Test that calling overall_run_time sets the run time"""
|
||||
timer = Timer()
|
||||
|
||||
assert timer.get_overall_run_time() is None
|
||||
|
||||
timer.overall_run_time()
|
||||
|
||||
assert isinstance(timer.get_overall_run_time(), timedelta)
|
||||
|
||||
def test_overall_run_time_accuracy(self):
|
||||
"""Test that overall_run_time calculates time difference accurately"""
|
||||
timer = Timer()
|
||||
sleep_duration = 0.05 # 50ms
|
||||
time.sleep(sleep_duration)
|
||||
|
||||
result = timer.overall_run_time()
|
||||
|
||||
# Allow for some variance (10ms tolerance)
|
||||
assert sleep_duration - 0.01 <= result.total_seconds() <= sleep_duration + 0.01
|
||||
|
||||
def test_overall_run_time_multiple_calls(self):
|
||||
"""Test that calling overall_run_time multiple times updates the values"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01)
|
||||
|
||||
first_result = timer.overall_run_time()
|
||||
first_end_time = timer.get_overall_end_time()
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
second_result = timer.overall_run_time()
|
||||
second_end_time = timer.get_overall_end_time()
|
||||
|
||||
# Second call should have longer runtime
|
||||
assert second_result > first_result
|
||||
assert second_end_time is not None
|
||||
assert first_end_time is not None
|
||||
# End time should be updated
|
||||
assert second_end_time > first_end_time
|
||||
|
||||
def test_overall_run_time_consistency(self):
|
||||
"""Test that get_overall_run_time returns the same value as overall_run_time"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01)
|
||||
|
||||
calculated_time = timer.overall_run_time()
|
||||
retrieved_time = timer.get_overall_run_time()
|
||||
|
||||
assert calculated_time == retrieved_time
|
||||
|
||||
|
||||
class TestRunTime:
|
||||
"""Test run time functionality"""
|
||||
|
||||
def test_run_time_returns_timedelta(self):
|
||||
"""Test that run_time returns a timedelta object"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01)
|
||||
|
||||
result = timer.run_time()
|
||||
|
||||
assert isinstance(result, timedelta)
|
||||
|
||||
def test_run_time_sets_end_time(self):
|
||||
"""Test that calling run_time sets the end time"""
|
||||
timer = Timer()
|
||||
|
||||
assert timer.get_end_time() is None
|
||||
|
||||
timer.run_time()
|
||||
|
||||
assert isinstance(timer.get_end_time(), datetime)
|
||||
|
||||
def test_run_time_sets_run_time(self):
|
||||
"""Test that calling run_time sets the run time"""
|
||||
timer = Timer()
|
||||
|
||||
assert timer.get_run_time() is None
|
||||
|
||||
timer.run_time()
|
||||
|
||||
assert isinstance(timer.get_run_time(), timedelta)
|
||||
|
||||
def test_run_time_accuracy(self):
|
||||
"""Test that run_time calculates time difference accurately"""
|
||||
timer = Timer()
|
||||
sleep_duration = 0.05 # 50ms
|
||||
time.sleep(sleep_duration)
|
||||
|
||||
result = timer.run_time()
|
||||
|
||||
# Allow for some variance (10ms tolerance)
|
||||
assert sleep_duration - 0.01 <= result.total_seconds() <= sleep_duration + 0.01
|
||||
|
||||
def test_run_time_multiple_calls(self):
|
||||
"""Test that calling run_time multiple times updates the values"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01)
|
||||
|
||||
first_result = timer.run_time()
|
||||
first_end_time = timer.get_end_time()
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
second_result = timer.run_time()
|
||||
second_end_time = timer.get_end_time()
|
||||
|
||||
# Second call should have longer runtime
|
||||
assert second_result > first_result
|
||||
assert second_end_time is not None
|
||||
assert first_end_time is not None
|
||||
# End time should be updated
|
||||
assert second_end_time > first_end_time
|
||||
|
||||
def test_run_time_consistency(self):
|
||||
"""Test that get_run_time returns the same value as run_time"""
|
||||
timer = Timer()
|
||||
time.sleep(0.01)
|
||||
|
||||
calculated_time = timer.run_time()
|
||||
retrieved_time = timer.get_run_time()
|
||||
|
||||
assert calculated_time == retrieved_time
|
||||
|
||||
|
||||
class TestResetRunTime:
|
||||
"""Test reset_run_time functionality"""
|
||||
|
||||
def test_reset_run_time_resets_start_time(self):
|
||||
"""Test that reset_run_time updates the start time"""
|
||||
timer = Timer()
|
||||
original_start = timer.get_start_time()
|
||||
|
||||
time.sleep(0.02)
|
||||
timer.reset_run_time()
|
||||
|
||||
new_start = timer.get_start_time()
|
||||
|
||||
assert new_start > original_start
|
||||
|
||||
def test_reset_run_time_clears_end_time(self):
|
||||
"""Test that reset_run_time clears the end time"""
|
||||
timer = Timer()
|
||||
timer.run_time()
|
||||
|
||||
assert timer.get_end_time() is not None
|
||||
|
||||
timer.reset_run_time()
|
||||
|
||||
assert timer.get_end_time() is None
|
||||
|
||||
def test_reset_run_time_clears_run_time(self):
|
||||
"""Test that reset_run_time clears the run time"""
|
||||
timer = Timer()
|
||||
timer.run_time()
|
||||
|
||||
assert timer.get_run_time() is not None
|
||||
|
||||
timer.reset_run_time()
|
||||
|
||||
assert timer.get_run_time() is None
|
||||
|
||||
def test_reset_run_time_does_not_affect_overall_times(self):
|
||||
"""Test that reset_run_time does not affect overall times"""
|
||||
timer = Timer()
|
||||
|
||||
overall_start = timer.get_overall_start_time()
|
||||
timer.overall_run_time()
|
||||
overall_end = timer.get_overall_end_time()
|
||||
overall_run = timer.get_overall_run_time()
|
||||
|
||||
timer.reset_run_time()
|
||||
|
||||
# Overall times should remain unchanged
|
||||
assert timer.get_overall_start_time() == overall_start
|
||||
assert timer.get_overall_end_time() == overall_end
|
||||
assert timer.get_overall_run_time() == overall_run
|
||||
|
||||
def test_reset_run_time_allows_new_measurement(self):
|
||||
"""Test that reset_run_time allows for new time measurements"""
|
||||
timer = Timer()
|
||||
time.sleep(0.02)
|
||||
timer.run_time()
|
||||
|
||||
first_run_time = timer.get_run_time()
|
||||
|
||||
timer.reset_run_time()
|
||||
time.sleep(0.01)
|
||||
timer.run_time()
|
||||
|
||||
second_run_time = timer.get_run_time()
|
||||
|
||||
assert second_run_time is not None
|
||||
assert first_run_time is not None
|
||||
# Second measurement should be shorter since we reset
|
||||
assert second_run_time < first_run_time
|
||||
|
||||
|
||||
class TestTimerIntegration:
|
||||
"""Integration tests for Timer class"""
|
||||
|
||||
def test_independent_timers(self):
|
||||
"""Test that multiple Timer instances are independent"""
|
||||
timer1 = Timer()
|
||||
time.sleep(0.01)
|
||||
timer2 = Timer()
|
||||
|
||||
# timer1 should have earlier start time
|
||||
assert timer1.get_start_time() < timer2.get_start_time()
|
||||
assert timer1.get_overall_start_time() < timer2.get_overall_start_time()
|
||||
|
||||
def test_overall_and_run_time_independence(self):
|
||||
"""Test that overall time and run time are independent"""
|
||||
timer = Timer()
|
||||
time.sleep(0.02)
|
||||
|
||||
# Reset run time but not overall
|
||||
timer.reset_run_time()
|
||||
time.sleep(0.01)
|
||||
|
||||
run_time = timer.run_time()
|
||||
overall_time = timer.overall_run_time()
|
||||
|
||||
# Overall time should be longer than run time
|
||||
assert overall_time > run_time
|
||||
|
||||
def test_typical_usage_pattern(self):
|
||||
"""Test a typical usage pattern of the Timer class"""
|
||||
timer = Timer()
|
||||
|
||||
# Measure first operation
|
||||
time.sleep(0.01)
|
||||
first_operation = timer.run_time()
|
||||
assert first_operation.total_seconds() > 0
|
||||
|
||||
# Reset and measure second operation
|
||||
timer.reset_run_time()
|
||||
time.sleep(0.01)
|
||||
second_operation = timer.run_time()
|
||||
assert second_operation.total_seconds() > 0
|
||||
|
||||
# Get overall time
|
||||
overall = timer.overall_run_time()
|
||||
|
||||
# Overall should be greater than individual operations
|
||||
assert overall > first_operation
|
||||
assert overall > second_operation
|
||||
|
||||
def test_zero_sleep_timer(self):
|
||||
"""Test timer with minimal sleep (edge case)"""
|
||||
timer = Timer()
|
||||
|
||||
# Call run_time immediately
|
||||
result = timer.run_time()
|
||||
|
||||
# Should still return a valid timedelta (very small)
|
||||
assert isinstance(result, timedelta)
|
||||
assert result.total_seconds() >= 0
|
||||
|
||||
def test_getter_methods_before_calculation(self):
|
||||
"""Test that getter methods return None before calculation methods are called"""
|
||||
timer = Timer()
|
||||
|
||||
# Before calling run_time()
|
||||
assert timer.get_end_time() is None
|
||||
assert timer.get_run_time() is None
|
||||
|
||||
# Before calling overall_run_time()
|
||||
assert timer.get_overall_end_time() is None
|
||||
assert timer.get_overall_run_time() is None
|
||||
|
||||
# But start times should always be set
|
||||
assert timer.get_start_time() is not None
|
||||
assert timer.get_overall_start_time() is not None
|
||||
|
||||
|
||||
class TestTimerEdgeCases:
|
||||
"""Test edge cases and boundary conditions"""
|
||||
|
||||
def test_rapid_consecutive_calls(self):
|
||||
"""Test rapid consecutive calls to run_time"""
|
||||
timer = Timer()
|
||||
|
||||
results: list[timedelta] = []
|
||||
for _ in range(5):
|
||||
results.append(timer.run_time())
|
||||
|
||||
# Each result should be greater than or equal to the previous
|
||||
for i in range(1, len(results)):
|
||||
assert results[i] >= results[i - 1]
|
||||
|
||||
def test_very_short_duration(self):
|
||||
"""Test timer with very short duration"""
|
||||
timer = Timer()
|
||||
result = timer.run_time()
|
||||
|
||||
# Should be a very small positive timedelta
|
||||
assert isinstance(result, timedelta)
|
||||
assert result.total_seconds() >= 0
|
||||
assert result.total_seconds() < 0.1 # Less than 100ms
|
||||
|
||||
def test_reset_multiple_times(self):
|
||||
"""Test resetting the timer multiple times"""
|
||||
timer = Timer()
|
||||
|
||||
for _ in range(3):
|
||||
timer.reset_run_time()
|
||||
time.sleep(0.01)
|
||||
result = timer.run_time()
|
||||
|
||||
assert isinstance(result, timedelta)
|
||||
assert result.total_seconds() > 0
|
||||
|
||||
def test_overall_time_persists_through_resets(self):
|
||||
"""Test that overall time continues even when run_time is reset"""
|
||||
timer = Timer()
|
||||
|
||||
time.sleep(0.01)
|
||||
timer.reset_run_time()
|
||||
|
||||
time.sleep(0.01)
|
||||
timer.reset_run_time()
|
||||
|
||||
overall = timer.overall_run_time()
|
||||
|
||||
# Overall time should reflect total elapsed time
|
||||
assert overall.total_seconds() >= 0.02
|
||||
|
||||
# __END__
|
||||
975
tests/unit/debug_handling/test_writeline.py
Normal file
975
tests/unit/debug_handling/test_writeline.py
Normal file
@@ -0,0 +1,975 @@
|
||||
"""
|
||||
Unit tests for debug_handling.writeline module
|
||||
"""
|
||||
|
||||
import io
|
||||
import pytest
|
||||
from pytest import CaptureFixture
|
||||
|
||||
from corelibs.debug_handling.writeline import (
|
||||
write_l,
|
||||
pr_header,
|
||||
pr_title,
|
||||
pr_open,
|
||||
pr_close,
|
||||
pr_act
|
||||
)
|
||||
|
||||
|
||||
class TestWriteL:
|
||||
"""Test cases for write_l function"""
|
||||
|
||||
def test_write_l_print_only(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with print_line=True and no file"""
|
||||
write_l("Test line", print_line=True)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "Test line\n"
|
||||
|
||||
def test_write_l_no_print_no_file(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with print_line=False and no file (should do nothing)"""
|
||||
write_l("Test line", print_line=False)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
|
||||
def test_write_l_file_only(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with file handler only (no print)"""
|
||||
fpl = io.StringIO()
|
||||
write_l("Test line", fpl=fpl, print_line=False)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
assert fpl.getvalue() == "Test line\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_both_print_and_file(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with both print and file output"""
|
||||
fpl = io.StringIO()
|
||||
write_l("Test line", fpl=fpl, print_line=True)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "Test line\n"
|
||||
assert fpl.getvalue() == "Test line\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_multiple_lines_to_file(self):
|
||||
"""Test write_l writing multiple lines to file"""
|
||||
fpl = io.StringIO()
|
||||
write_l("Line 1", fpl=fpl, print_line=False)
|
||||
write_l("Line 2", fpl=fpl, print_line=False)
|
||||
write_l("Line 3", fpl=fpl, print_line=False)
|
||||
assert fpl.getvalue() == "Line 1\nLine 2\nLine 3\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_empty_string(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with empty string"""
|
||||
fpl = io.StringIO()
|
||||
write_l("", fpl=fpl, print_line=True)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "\n"
|
||||
assert fpl.getvalue() == "\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_special_characters(self):
|
||||
"""Test write_l with special characters"""
|
||||
fpl = io.StringIO()
|
||||
special_line = "Special: \t\n\r\\ 特殊文字 €"
|
||||
write_l(special_line, fpl=fpl, print_line=False)
|
||||
assert special_line + "\n" in fpl.getvalue()
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_long_string(self):
|
||||
"""Test write_l with long string"""
|
||||
fpl = io.StringIO()
|
||||
long_line = "A" * 1000
|
||||
write_l(long_line, fpl=fpl, print_line=False)
|
||||
assert fpl.getvalue() == long_line + "\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_unicode_content(self):
|
||||
"""Test write_l with unicode content"""
|
||||
fpl = io.StringIO()
|
||||
unicode_line = "Hello 世界 🌍 Привет"
|
||||
write_l(unicode_line, fpl=fpl, print_line=False)
|
||||
assert fpl.getvalue() == unicode_line + "\n"
|
||||
fpl.close()
|
||||
|
||||
def test_write_l_default_parameters(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with default parameters"""
|
||||
write_l("Test")
|
||||
captured = capsys.readouterr()
|
||||
# Default print_line is False
|
||||
assert captured.out == ""
|
||||
|
||||
def test_write_l_with_newline_in_string(self):
|
||||
"""Test write_l with newline characters in the string"""
|
||||
fpl = io.StringIO()
|
||||
write_l("Line with\nnewline", fpl=fpl, print_line=False)
|
||||
assert fpl.getvalue() == "Line with\nnewline\n"
|
||||
fpl.close()
|
||||
|
||||
|
||||
class TestPrHeader:
|
||||
"""Test cases for pr_header function"""
|
||||
|
||||
def test_pr_header_default(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with default parameters"""
|
||||
pr_header("TEST")
|
||||
captured = capsys.readouterr()
|
||||
assert "#" in captured.out
|
||||
assert "TEST" in captured.out
|
||||
|
||||
def test_pr_header_custom_marker(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with custom marker string"""
|
||||
pr_header("TEST", marker_string="*")
|
||||
captured = capsys.readouterr()
|
||||
assert "*" in captured.out
|
||||
assert "TEST" in captured.out
|
||||
assert "#" not in captured.out
|
||||
|
||||
def test_pr_header_custom_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with custom width"""
|
||||
pr_header("TEST", width=50)
|
||||
captured = capsys.readouterr()
|
||||
# Check that output is formatted
|
||||
assert "TEST" in captured.out
|
||||
|
||||
def test_pr_header_short_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with short tag"""
|
||||
pr_header("X")
|
||||
captured = capsys.readouterr()
|
||||
assert "X" in captured.out
|
||||
assert "#" in captured.out
|
||||
|
||||
def test_pr_header_long_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with long tag"""
|
||||
pr_header("This is a very long header tag")
|
||||
captured = capsys.readouterr()
|
||||
assert "This is a very long header tag" in captured.out
|
||||
|
||||
def test_pr_header_empty_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with empty tag"""
|
||||
pr_header("")
|
||||
captured = capsys.readouterr()
|
||||
assert "#" in captured.out
|
||||
|
||||
def test_pr_header_special_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with special characters in tag"""
|
||||
pr_header("TEST: 123! @#$")
|
||||
captured = capsys.readouterr()
|
||||
assert "TEST: 123! @#$" in captured.out
|
||||
|
||||
def test_pr_header_unicode(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with unicode characters"""
|
||||
pr_header("テスト 🎉")
|
||||
captured = capsys.readouterr()
|
||||
assert "テスト 🎉" in captured.out
|
||||
|
||||
def test_pr_header_various_markers(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with various marker strings"""
|
||||
markers = ["*", "=", "-", "+", "~", "@"]
|
||||
for marker in markers:
|
||||
pr_header("TEST", marker_string=marker)
|
||||
captured = capsys.readouterr()
|
||||
assert marker in captured.out
|
||||
assert "TEST" in captured.out
|
||||
|
||||
def test_pr_header_zero_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with width of 0"""
|
||||
pr_header("TEST", width=0)
|
||||
captured = capsys.readouterr()
|
||||
assert "TEST" in captured.out
|
||||
|
||||
def test_pr_header_large_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with large width"""
|
||||
pr_header("TEST", width=100)
|
||||
captured = capsys.readouterr()
|
||||
assert "TEST" in captured.out
|
||||
assert "#" in captured.out
|
||||
|
||||
def test_pr_header_format(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header output format"""
|
||||
pr_header("CENTER", marker_string="#", width=20)
|
||||
captured = capsys.readouterr()
|
||||
# Should have spaces around centered text
|
||||
assert " CENTER " in captured.out or "CENTER" in captured.out
|
||||
|
||||
|
||||
class TestPrTitle:
|
||||
"""Test cases for pr_title function"""
|
||||
|
||||
def test_pr_title_default(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with default parameters"""
|
||||
pr_title("Test Title")
|
||||
captured = capsys.readouterr()
|
||||
assert "Test Title" in captured.out
|
||||
assert "|" in captured.out
|
||||
assert "." in captured.out
|
||||
assert ":" in captured.out
|
||||
|
||||
def test_pr_title_custom_prefix(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with custom prefix string"""
|
||||
pr_title("Test", prefix_string=">")
|
||||
captured = capsys.readouterr()
|
||||
assert ">" in captured.out
|
||||
assert "Test" in captured.out
|
||||
assert "|" not in captured.out
|
||||
|
||||
def test_pr_title_custom_space_filler(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with custom space filler"""
|
||||
pr_title("Test", space_filler="-")
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
assert "-" in captured.out
|
||||
assert "." not in captured.out
|
||||
|
||||
def test_pr_title_custom_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with custom width"""
|
||||
pr_title("Test", width=50)
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
|
||||
def test_pr_title_short_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with short tag"""
|
||||
pr_title("X")
|
||||
captured = capsys.readouterr()
|
||||
assert "X" in captured.out
|
||||
assert "." in captured.out
|
||||
|
||||
def test_pr_title_long_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with long tag"""
|
||||
pr_title("This is a very long title tag")
|
||||
captured = capsys.readouterr()
|
||||
assert "This is a very long title tag" in captured.out
|
||||
|
||||
def test_pr_title_empty_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with empty tag"""
|
||||
pr_title("")
|
||||
captured = capsys.readouterr()
|
||||
assert "|" in captured.out
|
||||
assert ":" in captured.out
|
||||
|
||||
def test_pr_title_special_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with special characters"""
|
||||
pr_title("Task #123!")
|
||||
captured = capsys.readouterr()
|
||||
assert "Task #123!" in captured.out
|
||||
|
||||
def test_pr_title_unicode(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with unicode characters"""
|
||||
pr_title("タイトル 📝")
|
||||
captured = capsys.readouterr()
|
||||
assert "タイトル 📝" in captured.out
|
||||
|
||||
def test_pr_title_various_fillers(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with various space fillers"""
|
||||
fillers = [".", "-", "_", "*", " ", "~"]
|
||||
for filler in fillers:
|
||||
pr_title("Test", space_filler=filler)
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
|
||||
def test_pr_title_zero_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with width of 0"""
|
||||
pr_title("Test", width=0)
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
|
||||
def test_pr_title_large_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with large width"""
|
||||
pr_title("Test", width=100)
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
|
||||
def test_pr_title_format_left_align(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title output format (should be left-aligned with filler)"""
|
||||
pr_title("Start", space_filler=".", width=10)
|
||||
captured = capsys.readouterr()
|
||||
# Should have the tag followed by dots
|
||||
assert "Start" in captured.out
|
||||
assert ":" in captured.out
|
||||
|
||||
|
||||
class TestPrOpen:
|
||||
"""Test cases for pr_open function"""
|
||||
|
||||
def test_pr_open_default(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with default parameters"""
|
||||
pr_open("Processing")
|
||||
captured = capsys.readouterr()
|
||||
assert "Processing" in captured.out
|
||||
assert "|" in captured.out
|
||||
assert "." in captured.out
|
||||
assert "[" in captured.out
|
||||
# Should not have newline at the end
|
||||
assert not captured.out.endswith("\n")
|
||||
|
||||
def test_pr_open_custom_prefix(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with custom prefix string"""
|
||||
pr_open("Task", prefix_string=">")
|
||||
captured = capsys.readouterr()
|
||||
assert ">" in captured.out
|
||||
assert "Task" in captured.out
|
||||
assert "|" not in captured.out
|
||||
|
||||
def test_pr_open_custom_space_filler(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with custom space filler"""
|
||||
pr_open("Task", space_filler="-")
|
||||
captured = capsys.readouterr()
|
||||
assert "Task" in captured.out
|
||||
assert "-" in captured.out
|
||||
assert "." not in captured.out
|
||||
|
||||
def test_pr_open_custom_width(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with custom width"""
|
||||
pr_open("Task", width=50)
|
||||
captured = capsys.readouterr()
|
||||
assert "Task" in captured.out
|
||||
assert "[" in captured.out
|
||||
|
||||
def test_pr_open_short_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with short tag"""
|
||||
pr_open("X")
|
||||
captured = capsys.readouterr()
|
||||
assert "X" in captured.out
|
||||
assert "[" in captured.out
|
||||
|
||||
def test_pr_open_long_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with long tag"""
|
||||
pr_open("This is a very long task tag")
|
||||
captured = capsys.readouterr()
|
||||
assert "This is a very long task tag" in captured.out
|
||||
|
||||
def test_pr_open_empty_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with empty tag"""
|
||||
pr_open("")
|
||||
captured = capsys.readouterr()
|
||||
assert "[" in captured.out
|
||||
assert "|" in captured.out
|
||||
|
||||
def test_pr_open_no_newline(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open doesn't end with newline"""
|
||||
pr_open("Test")
|
||||
captured = capsys.readouterr()
|
||||
# Output should not end with newline (uses end="")
|
||||
assert not captured.out.endswith("\n")
|
||||
|
||||
def test_pr_open_special_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with special characters"""
|
||||
pr_open("Loading: 50%")
|
||||
captured = capsys.readouterr()
|
||||
assert "Loading: 50%" in captured.out
|
||||
|
||||
def test_pr_open_unicode(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open with unicode characters"""
|
||||
pr_open("処理中 ⏳")
|
||||
captured = capsys.readouterr()
|
||||
assert "処理中 ⏳" in captured.out
|
||||
|
||||
def test_pr_open_format(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_open output format"""
|
||||
pr_open("Task", prefix_string="|", space_filler=".", width=20)
|
||||
captured = capsys.readouterr()
|
||||
assert "|" in captured.out
|
||||
assert "Task" in captured.out
|
||||
assert "[" in captured.out
|
||||
|
||||
|
||||
class TestPrClose:
|
||||
"""Test cases for pr_close function"""
|
||||
|
||||
def test_pr_close_default(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with default (empty) tag"""
|
||||
pr_close()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "]\n"
|
||||
|
||||
def test_pr_close_with_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with custom tag"""
|
||||
pr_close("DONE")
|
||||
captured = capsys.readouterr()
|
||||
assert "DONE" in captured.out
|
||||
assert "]" in captured.out
|
||||
assert captured.out.endswith("\n")
|
||||
|
||||
def test_pr_close_with_space(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with space in tag"""
|
||||
pr_close(" OK ")
|
||||
captured = capsys.readouterr()
|
||||
assert " OK " in captured.out
|
||||
assert "]" in captured.out
|
||||
|
||||
def test_pr_close_empty_string(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with empty string (same as default)"""
|
||||
pr_close("")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "]\n"
|
||||
|
||||
def test_pr_close_special_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with special characters"""
|
||||
pr_close("✓")
|
||||
captured = capsys.readouterr()
|
||||
assert "✓" in captured.out
|
||||
assert "]" in captured.out
|
||||
|
||||
def test_pr_close_unicode(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with unicode characters"""
|
||||
pr_close("完了")
|
||||
captured = capsys.readouterr()
|
||||
assert "完了" in captured.out
|
||||
assert "]" in captured.out
|
||||
|
||||
def test_pr_close_newline(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close ends with newline"""
|
||||
pr_close("OK")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out.endswith("\n")
|
||||
|
||||
def test_pr_close_various_tags(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with various tags"""
|
||||
tags = ["OK", "DONE", "✓", "✗", "SKIP", "PASS", "FAIL"]
|
||||
for tag in tags:
|
||||
pr_close(tag)
|
||||
captured = capsys.readouterr()
|
||||
assert tag in captured.out
|
||||
assert "]" in captured.out
|
||||
|
||||
|
||||
class TestPrAct:
|
||||
"""Test cases for pr_act function"""
|
||||
|
||||
def test_pr_act_default(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with default dot"""
|
||||
pr_act()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "."
|
||||
assert not captured.out.endswith("\n")
|
||||
|
||||
def test_pr_act_custom_character(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with custom character"""
|
||||
pr_act("#")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "#"
|
||||
|
||||
def test_pr_act_multiple_calls(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with multiple calls"""
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "..."
|
||||
|
||||
def test_pr_act_various_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with various characters"""
|
||||
characters = [".", "#", "*", "+", "-", "=", ">", "~"]
|
||||
for char in characters:
|
||||
pr_act(char)
|
||||
captured = capsys.readouterr()
|
||||
assert "".join(characters) in captured.out
|
||||
|
||||
def test_pr_act_empty_string(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with empty string"""
|
||||
pr_act("")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
|
||||
def test_pr_act_special_character(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with special characters"""
|
||||
pr_act("✓")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "✓"
|
||||
|
||||
def test_pr_act_unicode(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with unicode character"""
|
||||
pr_act("●")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "●"
|
||||
|
||||
def test_pr_act_no_newline(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act doesn't add newline"""
|
||||
pr_act("x")
|
||||
captured = capsys.readouterr()
|
||||
assert not captured.out.endswith("\n")
|
||||
|
||||
def test_pr_act_multiple_characters(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with multiple characters in string"""
|
||||
pr_act("...")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "..."
|
||||
|
||||
def test_pr_act_whitespace(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with whitespace"""
|
||||
pr_act(" ")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == " "
|
||||
|
||||
|
||||
class TestProgressCombinations:
|
||||
"""Test combinations of progress printer functions"""
|
||||
|
||||
def test_complete_progress_flow(self, capsys: CaptureFixture[str]):
|
||||
"""Test complete progress output flow"""
|
||||
pr_header("PROCESS")
|
||||
pr_title("Task 1")
|
||||
pr_open("Subtask")
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
pr_close(" OK")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "PROCESS" in captured.out
|
||||
assert "Task 1" in captured.out
|
||||
assert "Subtask" in captured.out
|
||||
assert "..." in captured.out
|
||||
assert " OK]" in captured.out
|
||||
|
||||
def test_multiple_tasks_progress(self, capsys: CaptureFixture[str]):
|
||||
"""Test multiple tasks with progress"""
|
||||
pr_header("BATCH PROCESS")
|
||||
for i in range(3):
|
||||
pr_open(f"Task {i + 1}")
|
||||
for _ in range(5):
|
||||
pr_act(".")
|
||||
pr_close(" DONE")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "BATCH PROCESS" in captured.out
|
||||
assert "Task 1" in captured.out
|
||||
assert "Task 2" in captured.out
|
||||
assert "Task 3" in captured.out
|
||||
assert " DONE]" in captured.out
|
||||
|
||||
def test_nested_progress(self, capsys: CaptureFixture[str]):
|
||||
"""Test nested progress indicators"""
|
||||
pr_header("MAIN TASK", marker_string="=")
|
||||
pr_title("Subtask A", prefix_string=">")
|
||||
pr_open("Processing")
|
||||
pr_act("#")
|
||||
pr_act("#")
|
||||
pr_close()
|
||||
pr_title("Subtask B", prefix_string=">")
|
||||
pr_open("Processing")
|
||||
pr_act("*")
|
||||
pr_act("*")
|
||||
pr_close(" OK")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "MAIN TASK" in captured.out
|
||||
assert "Subtask A" in captured.out
|
||||
assert "Subtask B" in captured.out
|
||||
assert "##" in captured.out
|
||||
assert "**" in captured.out
|
||||
|
||||
def test_progress_with_different_markers(self, capsys: CaptureFixture[str]):
|
||||
"""Test progress with different marker styles"""
|
||||
pr_header("Process", marker_string="*")
|
||||
pr_title("Step 1", prefix_string=">>", space_filler="-")
|
||||
pr_open("Work", prefix_string=">>", space_filler="-")
|
||||
pr_act("+")
|
||||
pr_close(" ✓")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "*" in captured.out
|
||||
assert ">>" in captured.out
|
||||
assert "-" in captured.out
|
||||
assert "+" in captured.out
|
||||
assert "✓" in captured.out
|
||||
|
||||
def test_empty_progress_sequence(self, capsys: CaptureFixture[str]):
|
||||
"""Test progress sequence with no actual progress"""
|
||||
pr_open("Quick task")
|
||||
pr_close(" SKIP")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "Quick task" in captured.out
|
||||
assert " SKIP]" in captured.out
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests combining multiple scenarios"""
|
||||
|
||||
def test_file_and_console_logging(self, capsys: CaptureFixture[str]):
|
||||
"""Test logging to both file and console"""
|
||||
fpl = io.StringIO()
|
||||
|
||||
write_l("Starting process", fpl=fpl, print_line=True)
|
||||
write_l("Processing item 1", fpl=fpl, print_line=True)
|
||||
write_l("Processing item 2", fpl=fpl, print_line=True)
|
||||
write_l("Complete", fpl=fpl, print_line=True)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
file_content = fpl.getvalue()
|
||||
|
||||
# Check console output
|
||||
assert "Starting process\n" in captured.out
|
||||
assert "Processing item 1\n" in captured.out
|
||||
assert "Processing item 2\n" in captured.out
|
||||
assert "Complete\n" in captured.out
|
||||
|
||||
# Check file output
|
||||
assert "Starting process\n" in file_content
|
||||
assert "Processing item 1\n" in file_content
|
||||
assert "Processing item 2\n" in file_content
|
||||
assert "Complete\n" in file_content
|
||||
|
||||
fpl.close()
|
||||
|
||||
def test_progress_with_logging(self, capsys: CaptureFixture[str]):
|
||||
"""Test combining progress output with file logging"""
|
||||
fpl = io.StringIO()
|
||||
|
||||
write_l("=== Process Start ===", fpl=fpl, print_line=True)
|
||||
pr_header("MAIN PROCESS")
|
||||
write_l("Header shown", fpl=fpl, print_line=False)
|
||||
|
||||
pr_open("Task 1")
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
pr_close(" OK")
|
||||
write_l("Task 1 completed", fpl=fpl, print_line=False)
|
||||
|
||||
write_l("=== Process End ===", fpl=fpl, print_line=True)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
file_content = fpl.getvalue()
|
||||
|
||||
assert "=== Process Start ===" in captured.out
|
||||
assert "MAIN PROCESS" in captured.out
|
||||
assert "Task 1" in captured.out
|
||||
assert "=== Process End ===" in captured.out
|
||||
|
||||
assert "=== Process Start ===\n" in file_content
|
||||
assert "Header shown\n" in file_content
|
||||
assert "Task 1 completed\n" in file_content
|
||||
assert "=== Process End ===\n" in file_content
|
||||
|
||||
fpl.close()
|
||||
|
||||
def test_complex_workflow(self, capsys: CaptureFixture[str]):
|
||||
"""Test complex workflow with all functions"""
|
||||
fpl = io.StringIO()
|
||||
|
||||
write_l("Log: Starting batch process", fpl=fpl, print_line=False)
|
||||
pr_header("BATCH PROCESSOR", marker_string="=", width=40)
|
||||
|
||||
for i in range(2):
|
||||
write_l(f"Log: Processing batch {i + 1}", fpl=fpl, print_line=False)
|
||||
pr_title(f"Batch {i + 1}", prefix_string="|", space_filler=".")
|
||||
|
||||
pr_open(f"Item {i + 1}", prefix_string="|", space_filler=".")
|
||||
for j in range(3):
|
||||
pr_act("*")
|
||||
write_l(f"Log: Progress {j + 1}/3", fpl=fpl, print_line=False)
|
||||
pr_close(" ✓")
|
||||
|
||||
write_l(f"Log: Batch {i + 1} complete", fpl=fpl, print_line=False)
|
||||
|
||||
write_l("Log: All batches complete", fpl=fpl, print_line=False)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
file_content = fpl.getvalue()
|
||||
|
||||
# Check console has progress indicators
|
||||
assert "BATCH PROCESSOR" in captured.out
|
||||
assert "Batch 1" in captured.out
|
||||
assert "Batch 2" in captured.out
|
||||
assert "***" in captured.out
|
||||
assert "✓" in captured.out
|
||||
|
||||
# Check file has all log entries
|
||||
assert "Log: Starting batch process\n" in file_content
|
||||
assert "Log: Processing batch 1\n" in file_content
|
||||
assert "Log: Processing batch 2\n" in file_content
|
||||
assert "Log: Progress 1/3\n" in file_content
|
||||
assert "Log: Batch 1 complete\n" in file_content
|
||||
assert "Log: All batches complete\n" in file_content
|
||||
|
||||
fpl.close()
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
"""Test edge cases and boundary conditions"""
|
||||
|
||||
def test_write_l_none_file_handler(self, capsys: CaptureFixture[str]):
|
||||
"""Test write_l explicitly with None file handler"""
|
||||
write_l("Test", fpl=None, print_line=True)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "Test\n"
|
||||
|
||||
def test_pr_header_negative_width(self):
|
||||
"""Test pr_header with negative width raises ValueError"""
|
||||
with pytest.raises(ValueError):
|
||||
pr_header("Test", width=-10)
|
||||
|
||||
def test_pr_title_negative_width(self):
|
||||
"""Test pr_title with negative width raises ValueError"""
|
||||
with pytest.raises(ValueError):
|
||||
pr_title("Test", width=-10)
|
||||
|
||||
def test_pr_open_negative_width(self):
|
||||
"""Test pr_open with negative width raises ValueError"""
|
||||
with pytest.raises(ValueError):
|
||||
pr_open("Test", width=-10)
|
||||
|
||||
def test_multiple_pr_act_no_close(self, capsys: CaptureFixture[str]):
|
||||
"""Test multiple pr_act calls without pr_close"""
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
pr_act(".")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "..."
|
||||
|
||||
def test_pr_close_without_pr_open(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close without prior pr_open (should still work)"""
|
||||
pr_close(" OK")
|
||||
captured = capsys.readouterr()
|
||||
assert " OK]" in captured.out
|
||||
|
||||
def test_very_long_strings(self):
|
||||
"""Test with very long strings"""
|
||||
fpl = io.StringIO()
|
||||
long_str = "A" * 10000
|
||||
write_l(long_str, fpl=fpl, print_line=False)
|
||||
assert len(fpl.getvalue()) == 10001 # string + newline
|
||||
fpl.close()
|
||||
|
||||
def test_pr_header_very_long_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with tag longer than width"""
|
||||
pr_header("This is a very long tag that exceeds the width", width=10)
|
||||
captured = capsys.readouterr()
|
||||
assert "This is a very long tag that exceeds the width" in captured.out
|
||||
|
||||
def test_pr_title_very_long_tag(self, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with tag longer than width"""
|
||||
pr_title("This is a very long tag that exceeds the width", width=10)
|
||||
captured = capsys.readouterr()
|
||||
assert "This is a very long tag that exceeds the width" in captured.out
|
||||
|
||||
def test_write_l_closed_file(self):
|
||||
"""Test write_l with closed file should raise error"""
|
||||
fpl = io.StringIO()
|
||||
fpl.close()
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
write_l("Test", fpl=fpl, print_line=False)
|
||||
|
||||
|
||||
class TestParametrized:
|
||||
"""Parametrized tests for comprehensive coverage"""
|
||||
|
||||
@pytest.mark.parametrize("print_line", [True, False])
|
||||
def test_write_l_print_line_variations(self, print_line: bool, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with different print_line values"""
|
||||
write_l("Test", print_line=print_line)
|
||||
captured = capsys.readouterr()
|
||||
if print_line:
|
||||
assert captured.out == "Test\n"
|
||||
else:
|
||||
assert captured.out == ""
|
||||
|
||||
@pytest.mark.parametrize("marker", ["#", "*", "=", "-", "+", "~", "@", "^"])
|
||||
def test_pr_header_various_markers_param(self, marker: str, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with various markers"""
|
||||
pr_header("TEST", marker_string=marker)
|
||||
captured = capsys.readouterr()
|
||||
assert marker in captured.out
|
||||
assert "TEST" in captured.out
|
||||
|
||||
@pytest.mark.parametrize("width", [0, 5, 10, 20, 35, 50, 100])
|
||||
def test_pr_header_various_widths(self, width: int, capsys: CaptureFixture[str]):
|
||||
"""Test pr_header with various widths"""
|
||||
pr_header("TEST", width=width)
|
||||
captured = capsys.readouterr()
|
||||
assert "TEST" in captured.out
|
||||
|
||||
@pytest.mark.parametrize("filler", [".", "-", "_", "*", " ", "~", "="])
|
||||
def test_pr_title_various_fillers_param(self, filler: str, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with various space fillers"""
|
||||
pr_title("Test", space_filler=filler)
|
||||
captured = capsys.readouterr()
|
||||
assert "Test" in captured.out
|
||||
|
||||
@pytest.mark.parametrize("prefix", ["|", ">", ">>", "*", "-", "+"])
|
||||
def test_pr_title_various_prefixes(self, prefix: str, capsys: CaptureFixture[str]):
|
||||
"""Test pr_title with various prefix strings"""
|
||||
pr_title("Test", prefix_string=prefix)
|
||||
captured = capsys.readouterr()
|
||||
assert prefix in captured.out
|
||||
assert "Test" in captured.out
|
||||
|
||||
@pytest.mark.parametrize("act_char", [".", "#", "*", "+", "-", "=", ">", "~", "✓", "●"])
|
||||
def test_pr_act_various_characters_param(self, act_char: str, capsys: CaptureFixture[str]):
|
||||
"""Test pr_act with various characters"""
|
||||
pr_act(act_char)
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == act_char
|
||||
|
||||
@pytest.mark.parametrize("close_tag", ["", " OK", " DONE", " ✓", " ✗", " SKIP", " PASS"])
|
||||
def test_pr_close_various_tags_param(self, close_tag: str, capsys: CaptureFixture[str]):
|
||||
"""Test pr_close with various tags"""
|
||||
pr_close(close_tag)
|
||||
captured = capsys.readouterr()
|
||||
assert f"{close_tag}]" in captured.out
|
||||
|
||||
@pytest.mark.parametrize("content", [
|
||||
"Simple text",
|
||||
"Text with 特殊文字",
|
||||
"Text with emoji 🎉",
|
||||
"Text\twith\ttabs",
|
||||
"Multiple\n\nNewlines",
|
||||
"",
|
||||
"A" * 100,
|
||||
])
|
||||
def test_write_l_various_content(self, content: str, capsys: CaptureFixture[str]):
|
||||
"""Test write_l with various content types"""
|
||||
fpl = io.StringIO()
|
||||
write_l(content, fpl=fpl, print_line=True)
|
||||
captured = capsys.readouterr()
|
||||
assert content in captured.out
|
||||
assert content + "\n" in fpl.getvalue()
|
||||
fpl.close()
|
||||
|
||||
|
||||
class TestRealWorldScenarios:
|
||||
"""Test real-world usage scenarios"""
|
||||
|
||||
def test_batch_processing_output(self, capsys: CaptureFixture[str]):
|
||||
"""Test typical batch processing output"""
|
||||
pr_header("BATCH PROCESSOR", marker_string="=", width=50)
|
||||
|
||||
items = ["file1.txt", "file2.txt", "file3.txt"]
|
||||
for item in items:
|
||||
pr_open(f"Processing {item}")
|
||||
for _ in range(10):
|
||||
pr_act(".")
|
||||
pr_close(" ✓")
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "BATCH PROCESSOR" in captured.out
|
||||
for item in items:
|
||||
assert item in captured.out
|
||||
assert "✓" in captured.out
|
||||
|
||||
def test_logging_workflow(self, capsys: CaptureFixture[str]):
|
||||
"""Test typical logging workflow"""
|
||||
log_file = io.StringIO()
|
||||
|
||||
# Simulate a workflow with logging
|
||||
write_l("[INFO] Starting process", fpl=log_file, print_line=True)
|
||||
write_l("[INFO] Initializing components", fpl=log_file, print_line=True)
|
||||
write_l("[DEBUG] Component A loaded", fpl=log_file, print_line=False)
|
||||
write_l("[DEBUG] Component B loaded", fpl=log_file, print_line=False)
|
||||
write_l("[INFO] Processing data", fpl=log_file, print_line=True)
|
||||
write_l("[INFO] Process complete", fpl=log_file, print_line=True)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
log_content = log_file.getvalue()
|
||||
|
||||
# Console should only have INFO messages
|
||||
assert "[INFO] Starting process" in captured.out
|
||||
assert "[DEBUG] Component A loaded" not in captured.out
|
||||
|
||||
# Log file should have all messages
|
||||
assert "[INFO] Starting process\n" in log_content
|
||||
assert "[DEBUG] Component A loaded\n" in log_content
|
||||
assert "[DEBUG] Component B loaded\n" in log_content
|
||||
|
||||
log_file.close()
|
||||
|
||||
def test_progress_indicator_for_long_task(self, capsys: CaptureFixture[str]):
|
||||
"""Test progress indicator for a long-running task"""
|
||||
pr_header("DATA PROCESSING")
|
||||
pr_open("Loading data", width=50)
|
||||
|
||||
# Simulate progress
|
||||
for i in range(20):
|
||||
if i % 5 == 0:
|
||||
pr_act(str(i // 5))
|
||||
else:
|
||||
pr_act(".")
|
||||
|
||||
pr_close(" COMPLETE")
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "DATA PROCESSING" in captured.out
|
||||
assert "Loading data" in captured.out
|
||||
assert "COMPLETE" in captured.out
|
||||
|
||||
def test_multi_stage_process(self, capsys: CaptureFixture[str]):
|
||||
"""Test multi-stage process with titles and progress"""
|
||||
pr_header("DEPLOYMENT PIPELINE", marker_string="=")
|
||||
|
||||
stages = ["Build", "Test", "Deploy"]
|
||||
for stage in stages:
|
||||
pr_title(stage)
|
||||
pr_open(f"Running {stage.lower()}")
|
||||
pr_act("#")
|
||||
pr_act("#")
|
||||
pr_act("#")
|
||||
pr_close(" OK")
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "DEPLOYMENT PIPELINE" in captured.out
|
||||
for stage in stages:
|
||||
assert stage in captured.out
|
||||
assert "###" in captured.out
|
||||
|
||||
def test_error_reporting_with_logging(self, capsys: CaptureFixture[str]):
|
||||
"""Test error reporting workflow"""
|
||||
error_log = io.StringIO()
|
||||
|
||||
pr_header("VALIDATION", marker_string="!")
|
||||
pr_open("Checking files")
|
||||
|
||||
write_l("[ERROR] File not found: data.csv", fpl=error_log, print_line=False)
|
||||
pr_act("✗")
|
||||
|
||||
write_l("[ERROR] Permission denied: output.txt", fpl=error_log, print_line=False)
|
||||
pr_act("✗")
|
||||
|
||||
pr_close(" FAILED")
|
||||
|
||||
captured = capsys.readouterr()
|
||||
log_content = error_log.getvalue()
|
||||
|
||||
assert "VALIDATION" in captured.out
|
||||
assert "Checking files" in captured.out
|
||||
assert "✗✗" in captured.out
|
||||
assert "FAILED" in captured.out
|
||||
|
||||
assert "[ERROR] File not found: data.csv\n" in log_content
|
||||
assert "[ERROR] Permission denied: output.txt\n" in log_content
|
||||
|
||||
error_log.close()
|
||||
|
||||
def test_detailed_reporting(self, capsys: CaptureFixture[str]):
|
||||
"""Test detailed reporting with mixed output"""
|
||||
report_file = io.StringIO()
|
||||
|
||||
pr_header("SYSTEM REPORT", marker_string="#", width=60)
|
||||
write_l("=== System Report Generated ===", fpl=report_file, print_line=False)
|
||||
|
||||
pr_title("Database Status", prefix_string=">>")
|
||||
write_l("Database: Connected", fpl=report_file, print_line=False)
|
||||
write_l("Tables: 15", fpl=report_file, print_line=False)
|
||||
write_l("Records: 1,234,567", fpl=report_file, print_line=False)
|
||||
|
||||
pr_title("API Status", prefix_string=">>")
|
||||
write_l("API: Online", fpl=report_file, print_line=False)
|
||||
write_l("Requests/min: 1,500", fpl=report_file, print_line=False)
|
||||
|
||||
write_l("=== Report Complete ===", fpl=report_file, print_line=False)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
report_content = report_file.getvalue()
|
||||
|
||||
assert "SYSTEM REPORT" in captured.out
|
||||
assert "Database Status" in captured.out
|
||||
assert "API Status" in captured.out
|
||||
|
||||
assert "=== System Report Generated ===\n" in report_content
|
||||
assert "Database: Connected\n" in report_content
|
||||
assert "API: Online\n" in report_content
|
||||
assert "=== Report Complete ===\n" in report_content
|
||||
|
||||
report_file.close()
|
||||
|
||||
# __END__
|
||||
1249
tests/unit/email_handling/test_send_email.py
Normal file
1249
tests/unit/email_handling/test_send_email.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user