Compare commits

...

3 Commits

Author SHA1 Message Date
Thaddeus Hughes
1a22d3e9aa build 2025-10-15 15:13:50 -05:00
Thaddeus Hughes
150cd51cd5 download! 2025-10-15 14:35:27 -05:00
Thaddeus Hughes
7df55a9914 template.xlsx in dist folder 2025-10-09 09:00:43 -05:00
21 changed files with 14573 additions and 5819 deletions

View File

@@ -1 +1,2 @@
pyinstaller --noconsole --noconfirm -i icon.ico main.py
cp template.xlsx dist/main/template.xlsx

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -71,7 +71,7 @@
[],
False,
False,
1760017692,
1760558669,
[('runw.exe',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\bootloader\\Windows-64bit-intel\\runw.exe',
'EXECUTABLE')],

Binary file not shown.

View File

@@ -455,6 +455,36 @@
('calendar',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\calendar.py',
'PYMODULE'),
('certifi',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\certifi\\__init__.py',
'PYMODULE'),
('certifi.core',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\certifi\\core.py',
'PYMODULE'),
('charset_normalizer',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\__init__.py',
'PYMODULE'),
('charset_normalizer.api',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\api.py',
'PYMODULE'),
('charset_normalizer.cd',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\cd.py',
'PYMODULE'),
('charset_normalizer.constant',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\constant.py',
'PYMODULE'),
('charset_normalizer.legacy',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\legacy.py',
'PYMODULE'),
('charset_normalizer.models',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\models.py',
'PYMODULE'),
('charset_normalizer.utils',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\utils.py',
'PYMODULE'),
('charset_normalizer.version',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\charset_normalizer\\version.py',
'PYMODULE'),
('code',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\code.py',
'PYMODULE'),
@@ -656,9 +686,30 @@
('http.cookiejar',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\http\\cookiejar.py',
'PYMODULE'),
('http.cookies',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\http\\cookies.py',
'PYMODULE'),
('http.server',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\http\\server.py',
'PYMODULE'),
('idna',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\__init__.py',
'PYMODULE'),
('idna.core',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\core.py',
'PYMODULE'),
('idna.idnadata',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\idnadata.py',
'PYMODULE'),
('idna.intranges',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\intranges.py',
'PYMODULE'),
('idna.package_data',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\package_data.py',
'PYMODULE'),
('idna.uts46data',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\idna\\uts46data.py',
'PYMODULE'),
('importlib',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\importlib\\__init__.py',
'PYMODULE'),
@@ -1449,6 +1500,57 @@
('random',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\random.py',
'PYMODULE'),
('requests',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\__init__.py',
'PYMODULE'),
('requests.__version__',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\__version__.py',
'PYMODULE'),
('requests._internal_utils',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\_internal_utils.py',
'PYMODULE'),
('requests.adapters',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\adapters.py',
'PYMODULE'),
('requests.api',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\api.py',
'PYMODULE'),
('requests.auth',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\auth.py',
'PYMODULE'),
('requests.certs',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\certs.py',
'PYMODULE'),
('requests.compat',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\compat.py',
'PYMODULE'),
('requests.cookies',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\cookies.py',
'PYMODULE'),
('requests.exceptions',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\exceptions.py',
'PYMODULE'),
('requests.hooks',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\hooks.py',
'PYMODULE'),
('requests.models',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\models.py',
'PYMODULE'),
('requests.packages',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\packages.py',
'PYMODULE'),
('requests.sessions',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\sessions.py',
'PYMODULE'),
('requests.status_codes',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\status_codes.py',
'PYMODULE'),
('requests.structures',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\structures.py',
'PYMODULE'),
('requests.utils',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\requests\\utils.py',
'PYMODULE'),
('rlcompleter',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\rlcompleter.py',
'PYMODULE'),
@@ -2068,6 +2170,114 @@
('urllib.response',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\urllib\\response.py',
'PYMODULE'),
('urllib3',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\__init__.py',
'PYMODULE'),
('urllib3._base_connection',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\_base_connection.py',
'PYMODULE'),
('urllib3._collections',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\_collections.py',
'PYMODULE'),
('urllib3._request_methods',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\_request_methods.py',
'PYMODULE'),
('urllib3._version',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\_version.py',
'PYMODULE'),
('urllib3.connection',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\connection.py',
'PYMODULE'),
('urllib3.connectionpool',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\connectionpool.py',
'PYMODULE'),
('urllib3.contrib',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\__init__.py',
'PYMODULE'),
('urllib3.contrib.emscripten',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\emscripten\\__init__.py',
'PYMODULE'),
('urllib3.contrib.emscripten.connection',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\emscripten\\connection.py',
'PYMODULE'),
('urllib3.contrib.emscripten.fetch',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\emscripten\\fetch.py',
'PYMODULE'),
('urllib3.contrib.emscripten.request',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\emscripten\\request.py',
'PYMODULE'),
('urllib3.contrib.emscripten.response',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\emscripten\\response.py',
'PYMODULE'),
('urllib3.contrib.pyopenssl',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\pyopenssl.py',
'PYMODULE'),
('urllib3.contrib.socks',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\contrib\\socks.py',
'PYMODULE'),
('urllib3.exceptions',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\exceptions.py',
'PYMODULE'),
('urllib3.fields',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\fields.py',
'PYMODULE'),
('urllib3.filepost',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\filepost.py',
'PYMODULE'),
('urllib3.http2',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\http2\\__init__.py',
'PYMODULE'),
('urllib3.http2.connection',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\http2\\connection.py',
'PYMODULE'),
('urllib3.http2.probe',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\http2\\probe.py',
'PYMODULE'),
('urllib3.poolmanager',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\poolmanager.py',
'PYMODULE'),
('urllib3.response',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\response.py',
'PYMODULE'),
('urllib3.util',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\__init__.py',
'PYMODULE'),
('urllib3.util.connection',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\connection.py',
'PYMODULE'),
('urllib3.util.proxy',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\proxy.py',
'PYMODULE'),
('urllib3.util.request',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\request.py',
'PYMODULE'),
('urllib3.util.response',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\response.py',
'PYMODULE'),
('urllib3.util.retry',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\retry.py',
'PYMODULE'),
('urllib3.util.ssl_',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\ssl_.py',
'PYMODULE'),
('urllib3.util.ssl_match_hostname',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\ssl_match_hostname.py',
'PYMODULE'),
('urllib3.util.ssltransport',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\ssltransport.py',
'PYMODULE'),
('urllib3.util.timeout',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\timeout.py',
'PYMODULE'),
('urllib3.util.url',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\url.py',
'PYMODULE'),
('urllib3.util.util',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\util.py',
'PYMODULE'),
('urllib3.util.wait',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\urllib3\\util\\wait.py',
'PYMODULE'),
('webbrowser',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\webbrowser.py',
'PYMODULE'),

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -15,18 +15,18 @@ IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
tracking down the missing module yourself. Thanks!
missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
missing module named 'collections.abc' - imported by traceback (top-level), typing (top-level), inspect (top-level), logging (top-level), importlib.resources.readers (top-level), selectors (top-level), tracemalloc (top-level), http.client (top-level), setuptools (top-level), setuptools._distutils.filelist (top-level), setuptools._distutils.util (top-level), setuptools._vendor.jaraco.functools (top-level), setuptools._vendor.more_itertools.more (top-level), setuptools._vendor.more_itertools.recipes (top-level), setuptools._distutils._modified (top-level), setuptools._distutils.compat (top-level), setuptools._distutils.spawn (top-level), setuptools._distutils.compilers.C.base (top-level), setuptools._distutils.fancy_getopt (top-level), setuptools._reqs (top-level), setuptools.discovery (top-level), setuptools.dist (top-level), setuptools._distutils.command.bdist (top-level), setuptools._distutils.core (top-level), setuptools._distutils.cmd (top-level), setuptools._distutils.dist (top-level), configparser (top-level), setuptools._distutils.extension (top-level), setuptools.config.setupcfg (top-level), setuptools.config.expand (top-level), setuptools.config.pyprojecttoml (top-level), setuptools.config._apply_pyprojecttoml (top-level), tomllib._parser (top-level), setuptools._vendor.tomli._parser (top-level), setuptools.command.egg_info (top-level), setuptools._distutils.command.build (top-level), setuptools._distutils.command.sdist (top-level), setuptools.glob (top-level), setuptools.command._requirestxt (top-level), setuptools.command.bdist_wheel (top-level), setuptools._vendor.wheel.cli.convert (top-level), setuptools._vendor.wheel.cli.tags (top-level), setuptools._vendor.typing_extensions (top-level), requests.compat (top-level), xml.etree.ElementTree (top-level), PIL.Image (top-level), PIL._typing (top-level), PIL.TiffImagePlugin (top-level), PIL.ImageOps (top-level), PIL.ImagePalette (top-level), PIL.ImageFilter (top-level), PIL.PngImagePlugin (top-level), PIL.Jpeg2KImagePlugin (top-level), PIL.IptcImagePlugin (top-level), setuptools._distutils.command.build_ext (top-level), _pyrepl.types (top-level), _pyrepl.readline (top-level), asyncio.base_events (top-level), asyncio.coroutines (top-level), setuptools._distutils.compilers.C.msvc (top-level)
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._distutils.util (delayed, conditional, optional), netrc (delayed, conditional), getpass (delayed, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional), http.server (delayed, optional)
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional)
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional), _pyrepl.unix_console (delayed, optional)
missing module named resource - imported by posix (top-level)
missing module named _scproxy - imported by urllib.request (conditional)
missing module named termios - imported by getpass (optional), tty (top-level), _pyrepl.pager (delayed, optional), _pyrepl.unix_console (top-level), _pyrepl.fancy_termios (top-level), _pyrepl.unix_eventqueue (top-level)
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._distutils.util (delayed, conditional, optional), netrc (delayed, conditional), getpass (delayed, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional), http.server (delayed, optional)
missing module named 'collections.abc' - imported by traceback (top-level), typing (top-level), inspect (top-level), logging (top-level), importlib.resources.readers (top-level), selectors (top-level), tracemalloc (top-level), xml.etree.ElementTree (top-level), PIL.Image (top-level), PIL._typing (top-level), setuptools (top-level), setuptools._distutils.filelist (top-level), setuptools._distutils.util (top-level), setuptools._vendor.jaraco.functools (top-level), setuptools._vendor.more_itertools.more (top-level), setuptools._vendor.more_itertools.recipes (top-level), setuptools._distutils._modified (top-level), setuptools._distutils.compat (top-level), setuptools._distutils.spawn (top-level), setuptools._distutils.compilers.C.base (top-level), setuptools._distutils.fancy_getopt (top-level), setuptools._reqs (top-level), http.client (top-level), setuptools.discovery (top-level), setuptools.dist (top-level), setuptools._distutils.command.bdist (top-level), setuptools._distutils.core (top-level), setuptools._distutils.cmd (top-level), setuptools._distutils.dist (top-level), configparser (top-level), setuptools._distutils.extension (top-level), setuptools.config.setupcfg (top-level), setuptools.config.expand (top-level), setuptools.config.pyprojecttoml (top-level), setuptools.config._apply_pyprojecttoml (top-level), tomllib._parser (top-level), setuptools._vendor.tomli._parser (top-level), setuptools.command.egg_info (top-level), setuptools._distutils.command.build (top-level), setuptools._distutils.command.sdist (top-level), setuptools.glob (top-level), setuptools.command._requirestxt (top-level), setuptools.command.bdist_wheel (top-level), setuptools._vendor.wheel.cli.convert (top-level), setuptools._vendor.wheel.cli.tags (top-level), setuptools._vendor.typing_extensions (top-level), PIL.TiffImagePlugin (top-level), PIL.ImageOps (top-level), PIL.ImagePalette (top-level), PIL.ImageFilter (top-level), PIL.PngImagePlugin (top-level), PIL.Jpeg2KImagePlugin (top-level), PIL.IptcImagePlugin (top-level), setuptools._distutils.command.build_ext (top-level), _pyrepl.types (top-level), _pyrepl.readline (top-level), asyncio.base_events (top-level), asyncio.coroutines (top-level), setuptools._distutils.compilers.C.msvc (top-level)
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed)
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional)
missing module named posix - imported by posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional), os (conditional, optional), _pyrepl.unix_console (delayed, optional)
missing module named resource - imported by posix (top-level)
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level)
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level)
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
@@ -57,6 +57,24 @@ missing module named 'lxml.etree' - imported by openpyxl.xml.functions (conditio
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
missing module named lxml - imported by openpyxl.xml (delayed, optional)
missing module named numpy - imported by openpyxl.compat.numbers (optional)
missing module named simplejson - imported by requests.compat (conditional, optional)
missing module named dummy_threading - imported by requests.cookies (optional)
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named compression - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
missing module named h2 - imported by urllib3.http2.connection (top-level)
missing module named brotli - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named socks - imported by urllib3.contrib.socks (optional)
missing module named cryptography - imported by urllib3.contrib.pyopenssl (top-level), requests (conditional, optional)
missing module named 'OpenSSL.crypto' - imported by urllib3.contrib.pyopenssl (delayed, conditional)
missing module named 'cryptography.x509' - imported by urllib3.contrib.pyopenssl (delayed, optional)
missing module named OpenSSL - imported by urllib3.contrib.pyopenssl (top-level)
missing module named chardet - imported by requests (optional)
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level)
missing module named vms_lib - imported by platform (delayed, optional)
missing module named 'java.lang' - imported by platform (delayed, optional)
missing module named java - imported by platform (delayed)

File diff suppressed because it is too large Load Diff

Binary file not shown.

4800
dist/main/_internal/certifi/cacert.pem vendored Normal file

File diff suppressed because it is too large Load Diff

0
dist/main/_internal/certifi/py.typed vendored Normal file
View File

Binary file not shown.

BIN
dist/main/main.exe vendored

Binary file not shown.

BIN
dist/main/template.xlsx vendored Normal file

Binary file not shown.

415
downloader.py Normal file
View File

@@ -0,0 +1,415 @@
import requests
from typing import List, Optional, Union, Tuple
import re
class RSLoggerDownloader:
def __init__(self, base_url: str):
"""
Initialize downloader for RS Logger device
Args:
base_url: Base URL like "http://rslogger" or "http://192.168.1.100"
"""
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
def get_config(self) -> dict:
"""Get logger configuration parameters"""
url = f"{self.base_url}/logc.xml"
response = self.session.get(url, timeout=5)
response.raise_for_status()
parts = response.text.split('#')
config = {
'date_from': parts[0],
'date_to': parts[1],
'timestamp': int(parts[2]),
'file_mode': int(parts[3]),
'data_format': int(parts[4]),
'timestamp_char': chr(int(parts[5])),
'time_format': int(parts[6])
}
return config
def _get_progress(self, data: bytearray) -> int:
"""Extract progress percentage from end of data"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
if length > 0:
return data[length - 1]
return 0
def _clear_endofline(self, data: bytearray) -> int:
"""Remove trailing carriage returns and get data length"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
return length
def decode_control_characters(self, data: bytes) -> str:
"""
Decode the <13>, <10>, etc. control character sequences back to actual characters.
Args:
data: Raw bytes with <NN> sequences
Returns:
String with control characters properly decoded
"""
# Convert bytes to string
text = data.decode('ascii', errors='replace')
# Replace <NN> patterns with actual characters
def replace_control(match):
code = int(match.group(1))
return chr(code)
# Match <digits> pattern and replace with the actual character
decoded = re.sub(r'<(\d+)>', replace_control, text)
return decoded
def parse_log_data(self, chunks: List[bytearray], config: dict, channel: int = 3) -> bytearray:
"""
Parse raw log data chunks into readable format
Args:
chunks: List of raw data chunks
config: Configuration dictionary from get_config()
channel: Channel to parse (1=A, 2=B, 3=both)
Returns:
Parsed data as bytearray
"""
result = bytearray()
# State variables for parsing
state = {
'last_ch': 0,
'last_char': 0,
'day_stamp': True,
'last_ta': 0,
'last_tb': 0,
'h': 0,
'd': 0,
'm': 0,
'y': 0
}
timestamp = config['timestamp']
data_format = config['data_format']
ts_char = config['timestamp_char']
time_format = config['time_format']
# Determine time interval
t_interval = 0
if timestamp >= 50000:
t_interval = timestamp - 50000
elif timestamp > 1000:
t_interval = timestamp - 1000
elif timestamp > 2:
timestamp = 2
for chunk_idx, chunk in enumerate(chunks):
length = self._clear_endofline(chunk)
if length <= 0:
continue
# Remove progress byte at end
length -= 1
if length <= 0:
continue
parsed = self._parse_chunk(
chunk, length, channel, timestamp, t_interval,
data_format, ts_char, time_format, state
)
result.extend(parsed)
return result
def _parse_chunk(self, data: bytearray, length: int, channel: int,
timestamp: int, t_interval: int, data_format: int,
ts_char: str, time_format: int, state: dict) -> bytearray:
"""Parse a single chunk of data"""
result = bytearray()
index = 0
while index < length:
if (length - index) < 4:
break
byte0 = data[index]
byte1 = data[index + 1]
byte2 = data[index + 2]
byte3 = data[index + 3]
# Check if this is a timestamp marker (high bit set)
if byte0 & 0x80:
# Date/time record
state['h'] = byte0 & 0x7F
dtmp = byte1
mtmp = byte2
ytmp = byte3 + 2000
if state['d'] != dtmp or state['m'] != mtmp or state['y'] != ytmp:
state['d'] = dtmp
state['m'] = mtmp
state['y'] = ytmp
if timestamp != 0:
if len(result) > 0:
result.extend(b'\r\n')
date_str = f"[{ytmp}-{mtmp:02d}-{dtmp:02d}]"
result.extend(date_str.encode('ascii'))
state['last_ta'] = 0
state['last_tb'] = 0
state['last_ch'] = 0
state['day_stamp'] = True
else:
# Data record
ch = 'B' if (byte0 & 0x40) else 'A'
ch_mask = 2 if ch == 'B' else 1
minute = byte0 & 0x3F
data_byte = byte1
tu16 = byte2 | (byte3 << 8)
ms = tu16 & 0x3FF
s = (byte3 >> 2) & 0x3F
if (channel & ch_mask):
# Format time string
if time_format == 0:
h_str = f"{state['h']:2d}"
else:
if state['h'] == 0:
h_str = "A12"
elif state['h'] < 12:
h_str = f"A{state['h']:2d}"
elif state['h'] == 12:
h_str = "P12"
else:
h_str = f"P{state['h']-12:2d}"
time_str = f"{h_str}:{minute:02d}:{s:02d}.{ms:03d}"
if channel == 3:
time_str += ch
time_str += ts_char
# Decide if we need to add timestamp based on config
add_time = self._should_add_timestamp(
timestamp, t_interval, ch_mask, s, minute, state['h'],
state, channel
)
if add_time:
result.extend(b'\r\n')
result.extend(time_str.encode('ascii'))
# Add the data byte
if data_format == 1:
# Hex format
result.extend(ts_char.encode('ascii'))
result.extend(f"{data_byte:x}".encode('ascii'))
else:
# ASCII format
if data_byte < 32:
result.extend(f"<{data_byte}>".encode('ascii'))
else:
result.append(data_byte)
state['day_stamp'] = False
state['last_ch'] = ch_mask
state['last_char'] = data_byte
index += 4
return result
def _should_add_timestamp(self, timestamp: int, t_interval: int,
ch_mask: int, s: int, minute: int, h: int,
state: dict, channel: int) -> bool:
"""Determine if timestamp should be added based on configuration"""
if timestamp == 1:
return True
if timestamp >= 50000:
if timestamp < 50256:
if t_interval == state['last_char']:
return True
elif timestamp == 2 or t_interval > 0:
t = s + 60 * minute + 3600 * h
should_add = False
if state['last_ch'] != ch_mask:
should_add = True
if t_interval > 0 and channel != 3:
should_add = False
if t_interval > 0:
t_last = state['last_ta'] if ch_mask == 1 else state['last_tb']
if (t - t_last) > t_interval:
should_add = True
if should_add:
if channel == 3:
state['last_ta'] = t
state['last_tb'] = t
elif ch_mask == 1:
state['last_ta'] = t
else:
state['last_tb'] = t
return True
return False
def download_via_pages(self, output_file: str = None, decode_controls: bool = True) -> Union[bytes, str]:
"""
Download data by fetching pages (like the preview does).
This bypasses the date range issue entirely.
Args:
output_file: Optional filename to save to
decode_controls: If True, decode <13>, <10> etc. to actual control characters
Returns:
Parsed log data as bytes or string (if decoded)
"""
print("Getting configuration...")
config = self.get_config()
print(f"Config: {config}")
all_data = bytearray()
# Get first page
print("\nFetching first page...")
url = f"{self.base_url}/page.xml?Page=0"
response = self.session.get(url, timeout=10)
response.raise_for_status()
first_page = bytearray(response.content)
print(f"First page: {len(first_page)} bytes")
if len(first_page) > 4:
all_data.extend(first_page)
# Get last page to see total size
print("Fetching last page...")
url = f"{self.base_url}/page.xml?Page=1"
response = self.session.get(url, timeout=10)
response.raise_for_status()
last_page = bytearray(response.content)
print(f"Last page: {len(last_page)} bytes")
# Now keep getting next pages until we get back to the last page
print("\nFetching all pages...")
current_page = first_page
page_count = 1
max_pages = 1000 # Safety limit
while page_count < max_pages:
url = f"{self.base_url}/page.xml?Page=3" # 3 = next page
response = self.session.get(url, timeout=10)
response.raise_for_status()
next_page = bytearray(response.content)
# Check if we've reached the end (data repeats)
if next_page == current_page or next_page == last_page:
print(f"Reached end after {page_count} pages")
break
if len(next_page) > 4:
all_data.extend(next_page)
page_count += 1
if page_count % 10 == 0:
print(f"Fetched {page_count} pages, {len(all_data)} bytes total...")
current_page = next_page
print(f"\nTotal raw data collected: {len(all_data)} bytes from {page_count} pages")
# Parse the data
print("Parsing data...")
file_mode = config['file_mode']
# Treat all_data as a single chunk
chunks = [all_data]
if file_mode == 2:
# Separate files for channel A and B
data_a = self.parse_log_data(chunks, config, channel=1)
data_b = self.parse_log_data(chunks, config, channel=2)
# Decode control characters if requested
if decode_controls:
data_a_decoded = self.decode_control_characters(data_a)
data_b_decoded = self.decode_control_characters(data_b)
if output_file:
with open(f"{output_file}_A.txt", 'w', encoding='utf-8') as f:
f.write(data_a_decoded)
with open(f"{output_file}_B.txt", 'w', encoding='utf-8') as f:
f.write(data_b_decoded)
print(f"\nSaved decoded data to {output_file}_A.txt and {output_file}_B.txt")
return data_a_decoded, data_b_decoded
else:
if output_file:
with open(f"{output_file}_A.txt", 'wb') as f:
f.write(data_a)
with open(f"{output_file}_B.txt", 'wb') as f:
f.write(data_b)
print(f"\nSaved raw data to {output_file}_A.txt and {output_file}_B.txt")
return data_a, data_b
else:
# Combined file
data = self.parse_log_data(chunks, config, channel=3)
# Decode control characters if requested
if decode_controls:
data_decoded = self.decode_control_characters(data)
if output_file:
with open(f"{output_file}.txt", 'w', encoding='utf-8') as f:
f.write(data_decoded)
print(f"\nSaved {len(data_decoded)} characters to {output_file}.txt")
return data_decoded
else:
if output_file:
with open(f"{output_file}.txt", 'wb') as f:
f.write(data)
print(f"\nSaved {len(data)} bytes to {output_file}.txt")
return data
# Usage example
if __name__ == "__main__":
downloader = RSLoggerDownloader("http://rslogger")
print("Downloading via page-by-page method...")
print("="*60 + "\n")
data = downloader.download_via_pages(
output_file="rslogger_data",
decode_controls=True # This will decode <13>, <10> etc.
)
if data and len(data) > 0:
print("\n" + "="*60)
print("Download complete!")
print("="*60)
print(f"\nFirst 1000 characters:")
print(data[:1000])
else:
print("\nNo data was downloaded.")

694
main.py
View File

@@ -5,14 +5,408 @@ from tkinter import filedialog, messagebox
import subprocess
import os
from datetime import datetime
import shutil
import platform
import requests
from typing import List, Union
try:
from openpyxl import load_workbook
XLSX_AVAILABLE = True
except ImportError:
XLSX_AVAILABLE = False
# RSLogger Downloader Classes
class RSLoggerDownloader:
def __init__(self, base_url: str):
"""
Initialize downloader for RS Logger device
Args:
base_url: Base URL like "http://rslogger" or "http://192.168.1.100"
"""
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
def get_config(self) -> dict:
"""Get logger configuration parameters"""
url = f"{self.base_url}/logc.xml"
response = self.session.get(url, timeout=5)
response.raise_for_status()
parts = response.text.split('#')
config = {
'date_from': parts[0],
'date_to': parts[1],
'timestamp': int(parts[2]),
'file_mode': int(parts[3]),
'data_format': int(parts[4]),
'timestamp_char': chr(int(parts[5])),
'time_format': int(parts[6])
}
return config
def _get_progress(self, data: bytearray) -> int:
"""Extract progress percentage from end of data"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
if length > 0:
return data[length - 1]
return 0
def _clear_endofline(self, data: bytearray) -> int:
"""Remove trailing carriage returns and get data length"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
return length
def decode_control_characters(self, data: bytes) -> str:
"""
Decode the <13>, <10>, etc. control character sequences back to actual characters.
Args:
data: Raw bytes with <NN> sequences
Returns:
String with control characters properly decoded
"""
# Convert bytes to string
text = data.decode('ascii', errors='replace')
# Replace <NN> patterns with actual characters
def replace_control(match):
code = int(match.group(1))
return chr(code)
# Match <digits> pattern and replace with the actual character
decoded = re.sub(r'<(\d+)>', replace_control, text)
return decoded
def parse_log_data(self, chunks: List[bytearray], config: dict, channel: int = 3) -> bytearray:
"""
Parse raw log data chunks into readable format
Args:
chunks: List of raw data chunks
config: Configuration dictionary from get_config()
channel: Channel to parse (1=A, 2=B, 3=both)
Returns:
Parsed data as bytearray
"""
result = bytearray()
# State variables for parsing
state = {
'last_ch': 0,
'last_char': 0,
'day_stamp': True,
'last_ta': 0,
'last_tb': 0,
'h': 0,
'd': 0,
'm': 0,
'y': 0
}
timestamp = config['timestamp']
data_format = config['data_format']
ts_char = config['timestamp_char']
time_format = config['time_format']
# Determine time interval
t_interval = 0
if timestamp >= 50000:
t_interval = timestamp - 50000
elif timestamp > 1000:
t_interval = timestamp - 1000
elif timestamp > 2:
timestamp = 2
for chunk_idx, chunk in enumerate(chunks):
length = self._clear_endofline(chunk)
if length <= 0:
continue
# Remove progress byte at end
length -= 1
if length <= 0:
continue
parsed = self._parse_chunk(
chunk, length, channel, timestamp, t_interval,
data_format, ts_char, time_format, state
)
result.extend(parsed)
return result
def _parse_chunk(self, data: bytearray, length: int, channel: int,
timestamp: int, t_interval: int, data_format: int,
ts_char: str, time_format: int, state: dict) -> bytearray:
"""Parse a single chunk of data"""
result = bytearray()
index = 0
while index < length:
if (length - index) < 4:
break
byte0 = data[index]
byte1 = data[index + 1]
byte2 = data[index + 2]
byte3 = data[index + 3]
# Check if this is a timestamp marker (high bit set)
if byte0 & 0x80:
# Date/time record
state['h'] = byte0 & 0x7F
dtmp = byte1
mtmp = byte2
ytmp = byte3 + 2000
if state['d'] != dtmp or state['m'] != mtmp or state['y'] != ytmp:
state['d'] = dtmp
state['m'] = mtmp
state['y'] = ytmp
if timestamp != 0:
if len(result) > 0:
result.extend(b'\r\n')
date_str = f"[{ytmp}-{mtmp:02d}-{dtmp:02d}]"
result.extend(date_str.encode('ascii'))
state['last_ta'] = 0
state['last_tb'] = 0
state['last_ch'] = 0
state['day_stamp'] = True
else:
# Data record
ch = 'B' if (byte0 & 0x40) else 'A'
ch_mask = 2 if ch == 'B' else 1
minute = byte0 & 0x3F
data_byte = byte1
tu16 = byte2 | (byte3 << 8)
ms = tu16 & 0x3FF
s = (byte3 >> 2) & 0x3F
if (channel & ch_mask):
# Format time string
if time_format == 0:
h_str = f"{state['h']:2d}"
else:
if state['h'] == 0:
h_str = "A12"
elif state['h'] < 12:
h_str = f"A{state['h']:2d}"
elif state['h'] == 12:
h_str = "P12"
else:
h_str = f"P{state['h']-12:2d}"
time_str = f"{h_str}:{minute:02d}:{s:02d}.{ms:03d}"
if channel == 3:
time_str += ch
time_str += ts_char
# Decide if we need to add timestamp based on config
add_time = self._should_add_timestamp(
timestamp, t_interval, ch_mask, s, minute, state['h'],
state, channel
)
if add_time:
result.extend(b'\r\n')
result.extend(time_str.encode('ascii'))
# Add the data byte
if data_format == 1:
# Hex format
result.extend(ts_char.encode('ascii'))
result.extend(f"{data_byte:x}".encode('ascii'))
else:
# ASCII format
if data_byte < 32:
result.extend(f"<{data_byte}>".encode('ascii'))
else:
result.append(data_byte)
state['day_stamp'] = False
state['last_ch'] = ch_mask
state['last_char'] = data_byte
index += 4
return result
def _should_add_timestamp(self, timestamp: int, t_interval: int,
ch_mask: int, s: int, minute: int, h: int,
state: dict, channel: int) -> bool:
"""Determine if timestamp should be added based on configuration"""
if timestamp == 1:
return True
if timestamp >= 50000:
if timestamp < 50256:
if t_interval == state['last_char']:
return True
elif timestamp == 2 or t_interval > 0:
t = s + 60 * minute + 3600 * h
should_add = False
if state['last_ch'] != ch_mask:
should_add = True
if t_interval > 0 and channel != 3:
should_add = False
if t_interval > 0:
t_last = state['last_ta'] if ch_mask == 1 else state['last_tb']
if (t - t_last) > t_interval:
should_add = True
if should_add:
if channel == 3:
state['last_ta'] = t
state['last_tb'] = t
elif ch_mask == 1:
state['last_ta'] = t
else:
state['last_tb'] = t
return True
return False
def download_via_pages(self, output_file: str = None, decode_controls: bool = True) -> Union[bytes, str]:
"""
Download data by fetching pages (like the preview does).
This bypasses the date range issue entirely.
Args:
output_file: Optional filename to save to
decode_controls: If True, decode <13>, <10> etc. to actual control characters
Returns:
Parsed log data as bytes or string (if decoded)
"""
print("Getting configuration...")
config = self.get_config()
print(f"Config: {config}")
all_data = bytearray()
# Get first page
print("\nFetching first page...")
url = f"{self.base_url}/page.xml?Page=0"
response = self.session.get(url, timeout=10)
response.raise_for_status()
first_page = bytearray(response.content)
print(f"First page: {len(first_page)} bytes")
if len(first_page) > 4:
all_data.extend(first_page)
# Get last page to see total size
print("Fetching last page...")
url = f"{self.base_url}/page.xml?Page=1"
response = self.session.get(url, timeout=10)
response.raise_for_status()
last_page = bytearray(response.content)
print(f"Last page: {len(last_page)} bytes")
# Now keep getting next pages until we get back to the last page
print("\nFetching all pages...")
current_page = first_page
page_count = 1
max_pages = 1000 # Safety limit
while page_count < max_pages:
url = f"{self.base_url}/page.xml?Page=3" # 3 = next page
response = self.session.get(url, timeout=10)
response.raise_for_status()
next_page = bytearray(response.content)
# Check if we've reached the end (data repeats)
if next_page == current_page or next_page == last_page:
print(f"Reached end after {page_count} pages")
break
if len(next_page) > 4:
all_data.extend(next_page)
page_count += 1
if page_count % 10 == 0:
print(f"Fetched {page_count} pages, {len(all_data)} bytes total...")
current_page = next_page
print(f"\nTotal raw data collected: {len(all_data)} bytes from {page_count} pages")
# Parse the data
print("Parsing data...")
file_mode = config['file_mode']
# Treat all_data as a single chunk
chunks = [all_data]
if file_mode == 2:
# Separate files for channel A and B
data_a = self.parse_log_data(chunks, config, channel=1)
data_b = self.parse_log_data(chunks, config, channel=2)
# Decode control characters if requested
if decode_controls:
data_a_decoded = self.decode_control_characters(data_a)
data_b_decoded = self.decode_control_characters(data_b)
if output_file:
with open(f"{output_file}_A.txt", 'w', encoding='utf-8') as f:
f.write(data_a_decoded)
with open(f"{output_file}_B.txt", 'w', encoding='utf-8') as f:
f.write(data_b_decoded)
print(f"\nSaved decoded data to {output_file}_A.txt and {output_file}_B.txt")
return data_a_decoded, data_b_decoded
else:
if output_file:
with open(f"{output_file}_A.txt", 'wb') as f:
f.write(data_a)
with open(f"{output_file}_B.txt", 'wb') as f:
f.write(data_b)
print(f"\nSaved raw data to {output_file}_A.txt and {output_file}_B.txt")
return data_a, data_b
else:
# Combined file
data = self.parse_log_data(chunks, config, channel=3)
# Decode control characters if requested
if decode_controls:
data_decoded = self.decode_control_characters(data)
if output_file:
with open(f"{output_file}.txt", 'w', encoding='utf-8') as f:
f.write(data_decoded)
print(f"\nSaved {len(data_decoded)} characters to {output_file}.txt")
return data_decoded
else:
if output_file:
with open(f"{output_file}.txt", 'wb') as f:
f.write(data)
print(f"\nSaved {len(data)} bytes to {output_file}.txt")
return data
# Log parsing and conversion functions
def parse_logs(log_text):
lines = log_text.splitlines()
if lines and '=~' in lines[0]:
@@ -39,7 +433,7 @@ def parse_logs(log_text):
try:
dt = datetime.strptime(ts_line, '%I:%M %p %m/%d/%y')
excel_ts = dt.strftime('%m/%d/%Y %I:%M %p')
pairs.append((weight, units, excel_ts))
pairs.append((weight, units, excel_ts, dt))
print((weight, units, excel_ts))
except ValueError:
i += 1
@@ -52,35 +446,143 @@ def parse_logs(log_text):
return pairs
def remove_duplicates(pairs):
"""Remove duplicate entries based on weight (within 20) and timestamp (within 1 minute)
Returns list of tuples where each tuple contains all the duplicate entries"""
if not pairs:
return []
# Group entries that are duplicates
deduplicated = []
i = 0
while i < len(pairs):
weight, units, excel_ts, dt = pairs[i]
duplicate_group = [(weight, units, excel_ts)]
# Look ahead for duplicates
j = i + 1
while j < len(pairs):
next_weight, next_units, next_excel_ts, next_dt = pairs[j]
# Check if it's a duplicate:
# - Weight within 20
# - Timestamp within 1 minute
weight_diff = abs(weight - next_weight)
time_diff = abs((dt - next_dt).total_seconds())
if weight_diff <= 20 and time_diff <= 60:
# It's a duplicate, add to group
duplicate_group.append((next_weight, next_units, next_excel_ts))
j += 1
else:
break
# Add the group as a tuple
deduplicated.append(tuple(duplicate_group))
i = j if j > i + 1 else i + 1
return deduplicated
def write_csv1(pairs, filename):
"""Write sequential CSV with duplicates pushed to the right"""
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['WEIGHT', 'UNITS', 'TIME'])
for weight, units, timestamp in pairs:
writer.writerow([weight, units, timestamp])
# Find max number of duplicates to determine column count
max_dups = max(len(group) for group in pairs)
# Create headers: WEIGHT, UNITS, TIME repeated for each duplicate
headers = []
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'WEIGHT{suffix}', f'UNITS{suffix}', f'TIME{suffix}'])
writer.writerow(headers)
# Write data rows
for group in pairs:
row = []
for weight, units, timestamp in group:
row.extend([weight, units, timestamp])
# Pad with empty strings if this group has fewer duplicates than max
while len(row) < max_dups * 3:
row.append('')
writer.writerow(row)
def write_csv2(pairs, filename):
"""Write joined CSV with duplicates pushed to the right"""
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['GROSS_WT', 'TARE_WT', 'NET_WT', 'GROSS_T', 'TARE_T', 'GROSS_UNITS', 'TARE_UNITS', 'NET_UNITS'])
# Find max number of duplicates to determine column count
max_dups = max(len(group) for group in pairs) if pairs else 1
# Create headers for gross and tare, repeated for duplicates
headers = []
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'GROSS_WT{suffix}', f'GROSS_UNITS{suffix}', f'GROSS_T{suffix}'])
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'TARE_WT{suffix}', f'TARE_UNITS{suffix}', f'TARE_T{suffix}'])
# Add NET columns at the end
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'NET_WT{suffix}', f'NET_UNITS{suffix}'])
writer.writerow(headers)
# Process pairs (gross/tare)
for j in range(0, len(pairs), 2):
row = []
# Write all GROSS entries
if j < len(pairs):
gross_group = pairs[j]
for weight, units, timestamp in gross_group:
row.extend([weight, units, timestamp])
# Pad gross to max_dups
while len(row) < max_dups * 3:
row.append('')
# Write all TARE entries
if j + 1 < len(pairs):
gross_weight, gross_units, gross_time = pairs[j]
tare_weight, tare_units, tare_time = pairs[j + 1]
tare_group = pairs[j + 1]
for weight, units, timestamp in tare_group:
row.extend([weight, units, timestamp])
# Pad tare to max_dups
while len(row) < max_dups * 6:
row.append('')
# Calculate NET for each duplicate pair
gross_group = pairs[j]
for k in range(max_dups):
if k < len(gross_group) and k < len(tare_group):
gross_weight = gross_group[k][0]
gross_units = gross_group[k][1]
tare_weight = tare_group[k][0]
tare_units = tare_group[k][1]
if gross_units == tare_units:
net = gross_weight - tare_weight
net_units = tare_units
if (tare_units != gross_units):
net_units = 'MISMATCH'
else:
net = 'UNIT MISMATCH'
net_units = 'MISMATCH'
row.extend([net, net_units])
else:
row.extend(['', ''])
else:
# Odd number of items, pad tare and net
while len(row) < max_dups * 6:
row.append('')
for k in range(max_dups):
row.extend(['', ''])
writer.writerow([gross_weight, tare_weight, net, gross_time, tare_time, gross_units, tare_units, net_units])
if (len(pairs) % 2): # if odd number of items
gross_weight, gross_units, gross_time = pairs[-1]
writer.writerow([gross_weight, '', '', gross_time, '', gross_units, '', ''])
writer.writerow(row)
def write_xlsx(pairs, output_filename, template_path='template.xlsx'):
"""Write sequential data to XLSX file using template"""
"""Write sequential data to XLSX file using template with duplicates pushed to the right"""
if not XLSX_AVAILABLE:
raise ImportError("openpyxl is required for XLSX export. Install with: pip install openpyxl")
@@ -102,11 +604,26 @@ def write_xlsx(pairs, output_filename, template_path='template.xlsx'):
if ws.max_row > 1:
ws.delete_rows(2, ws.max_row - 1)
# Write new data starting from row 2
for row_idx, (weight, units, timestamp) in enumerate(pairs, start=2):
ws.cell(row=row_idx, column=1, value=weight)
ws.cell(row=row_idx, column=2, value=units)
ws.cell(row=row_idx, column=3, value=timestamp)
# Find max number of duplicates
max_dups = max(len(group) for group in pairs) if pairs else 1
# Update headers if needed (row 1)
col = 1
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
ws.cell(row=1, column=col, value=f'WEIGHT{suffix}')
ws.cell(row=1, column=col+1, value=f'UNITS{suffix}')
ws.cell(row=1, column=col+2, value=f'TIME{suffix}')
col += 3
# Write data starting from row 2
for row_idx, group in enumerate(pairs, start=2):
col = 1
for weight, units, timestamp in group:
ws.cell(row=row_idx, column=col, value=weight)
ws.cell(row=row_idx, column=col+1, value=units)
ws.cell(row=row_idx, column=col+2, value=timestamp)
col += 3
# Set COMBINED as the active sheet (default sheet when opened)
if 'COMBINED' in wb.sheetnames:
@@ -149,17 +666,15 @@ def run_update_script():
except Exception as e:
messagebox.showerror("Error", f"Failed to run update.sh: {str(e)}")
def select_input_file():
filename = filedialog.askopenfilename(filetypes=[("Text files", "*.txt"), ("All files", "*.*")])
if filename:
input_file_var.set(filename)
def process_files():
input_file = input_file_var.get()
def convert_local_file():
"""Convert a local file selected by the user"""
input_file = filedialog.askopenfilename(
title="Select Log File to Convert",
filetypes=[("Text files", "*.txt"), ("All files", "*.*")]
)
if not input_file:
messagebox.showerror("Error", "Please select an input file.")
return
return # User cancelled
generate_csv = csv_var.get()
generate_xlsx = xlsx_var.get()
@@ -182,6 +697,9 @@ def process_files():
log_text = f.read()
pairs = parse_logs(log_text)
# Remove duplicates
pairs = remove_duplicates(pairs)
# Generate CSV files if checked
if generate_csv:
try:
@@ -222,20 +740,104 @@ def process_files():
except Exception as e:
messagebox.showerror("Error", f"An error occurred: {str(e)}")
root = tk.Tk()
root.title("Log to CSV Converter")
root.geometry("500x200")
def download_and_convert():
"""Download from RS Logger and convert"""
# Ask for destination file
output_file = filedialog.asksaveasfilename(
title="Save Downloaded Log As",
defaultextension=".txt",
filetypes=[("Text files", "*.txt"), ("All files", "*.*")]
)
if not output_file:
return # User cancelled
generate_csv = csv_var.get()
generate_xlsx = xlsx_var.get()
if not generate_csv and not generate_xlsx:
messagebox.showerror("Error", "Please select at least one output format (CSV or XLSX).")
return
try:
# Download from RS Logger
messagebox.showinfo("Downloading", "Connecting to RS Logger at http://rslogger\nThis may take a moment...")
downloader = RSLoggerDownloader("http://rslogger")
log_text = downloader.download_via_pages(
output_file=os.path.splitext(output_file)[0],
decode_controls=True
)
if not log_text:
messagebox.showerror("Error", "No data was downloaded from RS Logger")
return
# Now convert the downloaded data
base = os.path.splitext(output_file)[0]
output_file1 = f"{base}.sequential.csv"
output_file2 = f"{base}.joined.csv"
output_xlsx = f"{base}.xlsx"
generated_files = [f"{base}.txt"] # The downloaded txt file
error_messages = []
pairs = parse_logs(log_text)
# Remove duplicates
pairs = remove_duplicates(pairs)
# Generate CSV files if checked
if generate_csv:
try:
write_csv1(pairs, output_file1)
generated_files.append(output_file1)
write_csv2(pairs, output_file2)
generated_files.append(output_file2)
except Exception as e:
error_messages.append(f"CSV export failed: {str(e)}")
# Generate XLSX file if checked
if generate_xlsx:
try:
write_xlsx(pairs, output_xlsx)
generated_files.append(output_xlsx)
except ImportError as e:
error_messages.append(f"XLSX export skipped: {str(e)}")
except Exception as e:
error_messages.append(f"XLSX export failed: {str(e)}")
# Build success message
if generated_files:
files_list = "\n".join(generated_files)
error_info = ""
if error_messages:
error_info = "\n\nWarnings:\n" + "\n".join(error_messages)
# Custom dialog with "Show the files" button
response = messagebox.askquestion("Success",
f"Downloaded and converted!\n\nFiles generated:\n{files_list}{error_info}\n\nShow the files?",
icon='info')
if response == 'yes':
show_files_in_explorer(generated_files)
else:
messagebox.showerror("Error", "No files were generated.\n\n" + "\n".join(error_messages))
except Exception as e:
messagebox.showerror("Error", f"Download failed: {str(e)}")
# GUI Setup
root = tk.Tk()
root.title("RS Logger Converter")
root.geometry("500x220")
input_file_var = tk.StringVar()
csv_var = tk.BooleanVar(value=False) # CSV unchecked by default
xlsx_var = tk.BooleanVar(value=True) # XLSX checked by default
# Input file selection
input_frame = tk.Frame(root)
input_frame.pack(pady=5, padx=10, fill='x')
tk.Label(input_frame, text="Select Input Log File:").pack(side='left')
tk.Entry(input_frame, textvariable=input_file_var, width=40).pack(side='left', padx=5)
tk.Button(input_frame, text="Browse Input", bg="#2196F3", command=select_input_file).pack(side='left')
# Title label
title_label = tk.Label(root, text="RS Logger Data Converter", font=("Arial", 16, "bold"))
title_label.pack(pady=10)
# Output format checkboxes
format_frame = tk.Frame(root)
@@ -244,10 +846,16 @@ tk.Label(format_frame, text="Output Formats:").pack(side='left')
tk.Checkbutton(format_frame, text="CSV", variable=csv_var).pack(side='left', padx=10)
tk.Checkbutton(format_frame, text="XLSX", variable=xlsx_var).pack(side='left', padx=10)
# Convert button
tk.Button(root, text="CONVERT", command=process_files, font=("Arial", 14), relief="raised", bg="#4CAF50", fg="white").pack(pady=10, padx=10, fill='x')
# Convert local file button
tk.Button(root, text="CONVERT LOCAL FILE", command=convert_local_file,
font=("Arial", 12), relief="raised", bg="#2196F3", fg="white").pack(pady=5, padx=10, fill='x')
# Download and convert button
tk.Button(root, text="DOWNLOAD FROM RS LOGGER", command=download_and_convert,
font=("Arial", 12), relief="raised", bg="#4CAF50", fg="white").pack(pady=5, padx=10, fill='x')
# Update script button
tk.Button(root, text="Update this Application", command=run_update_script, font=("Arial", 12), relief="raised").pack(pady=5, padx=10, fill='x')
tk.Button(root, text="Update this Application", command=run_update_script,
font=("Arial", 10), relief="raised").pack(pady=5, padx=10, fill='x')
root.mainloop()

Binary file not shown.