Compare commits

...

5 Commits

Author SHA1 Message Date
Thaddeus Hughes
1a22d3e9aa build 2025-10-15 15:13:50 -05:00
Thaddeus Hughes
150cd51cd5 download! 2025-10-15 14:35:27 -05:00
Thaddeus Hughes
7df55a9914 template.xlsx in dist folder 2025-10-09 09:00:43 -05:00
Thaddeus Hughes
2208b5da87 checkboxes for output and showing output files 2025-10-09 08:48:40 -05:00
Thaddeus Hughes
948f9363c7 xlsx output 2025-10-09 08:45:36 -05:00
50 changed files with 37332 additions and 5501 deletions

View File

@@ -1,5 +0,0 @@
GROSS_WT,TARE_WT,NET_WT,GROSS_T,TARE_T,GROSS_UNITS,TARE_UNITS,NET_UNITS
0,3780,-3780,09/17/2025 06:12 PM,09/17/2025 06:13 PM,lb,lb,lb
3780,3640,140,09/17/2025 10:18 PM,09/17/2025 10:19 PM,lb,lb,lb
1640,0,1640,09/17/2025 10:19 PM,09/17/2025 10:20 PM,lb,lb,lb
1680,,,09/17/2025 10:19 PM,,lb,,
1 GROSS_WT TARE_WT NET_WT GROSS_T TARE_T GROSS_UNITS TARE_UNITS NET_UNITS
2 0 3780 -3780 09/17/2025 06:12 PM 09/17/2025 06:13 PM lb lb lb
3 3780 3640 140 09/17/2025 10:18 PM 09/17/2025 10:19 PM lb lb lb
4 1640 0 1640 09/17/2025 10:19 PM 09/17/2025 10:20 PM lb lb lb
5 1680 09/17/2025 10:19 PM lb

View File

@@ -1,8 +0,0 @@
WEIGHT,UNITS,TIME
0,lb,09/17/2025 06:12 PM
3780,lb,09/17/2025 06:13 PM
3780,lb,09/17/2025 10:18 PM
3640,lb,09/17/2025 10:19 PM
1640,lb,09/17/2025 10:19 PM
0,lb,09/17/2025 10:20 PM
1680,lb,09/17/2025 10:19 PM
1 WEIGHT UNITS TIME
2 0 lb 09/17/2025 06:12 PM
3 3780 lb 09/17/2025 06:13 PM
4 3780 lb 09/17/2025 10:18 PM
5 3640 lb 09/17/2025 10:19 PM
6 1640 lb 09/17/2025 10:19 PM
7 0 lb 09/17/2025 10:20 PM
8 1680 lb 09/17/2025 10:19 PM

View File

@@ -1 +1,2 @@
pyinstaller --noconsole --noconfirm -i icon.ico main.py pyinstaller --noconsole --noconfirm -i icon.ico main.py
cp template.xlsx dist/main/template.xlsx

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,15 @@
('pyi_rth_inspect', ('pyi_rth_inspect',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_inspect.py', 'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_inspect.py',
'PYSOURCE'), 'PYSOURCE'),
('pyi_rth_setuptools',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_setuptools.py',
'PYSOURCE'),
('pyi_rth_pkgutil',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_pkgutil.py',
'PYSOURCE'),
('pyi_rth_multiprocessing',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_multiprocessing.py',
'PYSOURCE'),
('pyi_rth__tkinter', ('pyi_rth__tkinter',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth__tkinter.py', 'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth__tkinter.py',
'PYSOURCE'), 'PYSOURCE'),
@@ -62,7 +71,7 @@
[], [],
False, False,
False, False,
1760015187, 1760558669,
[('runw.exe', [('runw.exe',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\bootloader\\Windows-64bit-intel\\runw.exe', 'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\bootloader\\Windows-64bit-intel\\runw.exe',
'EXECUTABLE')], 'EXECUTABLE')],

View File

@@ -33,6 +33,15 @@
('pyi_rth_inspect', ('pyi_rth_inspect',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_inspect.py', 'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_inspect.py',
'PYSOURCE'), 'PYSOURCE'),
('pyi_rth_setuptools',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_setuptools.py',
'PYSOURCE'),
('pyi_rth_pkgutil',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_pkgutil.py',
'PYSOURCE'),
('pyi_rth_multiprocessing',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth_multiprocessing.py',
'PYSOURCE'),
('pyi_rth__tkinter', ('pyi_rth__tkinter',
'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth__tkinter.py', 'C:\\Users\\Thad\\AppData\\Local\\Programs\\Python\\Python313\\Lib\\site-packages\\PyInstaller\\hooks\\rthooks\\pyi_rth__tkinter.py',
'PYSOURCE'), 'PYSOURCE'),

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -14,12 +14,67 @@ Types if import:
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
tracking down the missing module yourself. Thanks! tracking down the missing module yourself. Thanks!
missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional) missing module named _posixshmem - imported by multiprocessing.resource_tracker (conditional), multiprocessing.shared_memory (conditional)
missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional) missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional), zipimport (top-level)
missing module named _frozen_importlib_external - imported by importlib._bootstrap (delayed), importlib (optional), importlib.abc (optional) excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional), zipimport (top-level)
excluded module named _frozen_importlib - imported by importlib (optional), importlib.abc (optional) missing module named 'collections.abc' - imported by traceback (top-level), typing (top-level), inspect (top-level), logging (top-level), importlib.resources.readers (top-level), selectors (top-level), tracemalloc (top-level), http.client (top-level), setuptools (top-level), setuptools._distutils.filelist (top-level), setuptools._distutils.util (top-level), setuptools._vendor.jaraco.functools (top-level), setuptools._vendor.more_itertools.more (top-level), setuptools._vendor.more_itertools.recipes (top-level), setuptools._distutils._modified (top-level), setuptools._distutils.compat (top-level), setuptools._distutils.spawn (top-level), setuptools._distutils.compilers.C.base (top-level), setuptools._distutils.fancy_getopt (top-level), setuptools._reqs (top-level), setuptools.discovery (top-level), setuptools.dist (top-level), setuptools._distutils.command.bdist (top-level), setuptools._distutils.core (top-level), setuptools._distutils.cmd (top-level), setuptools._distutils.dist (top-level), configparser (top-level), setuptools._distutils.extension (top-level), setuptools.config.setupcfg (top-level), setuptools.config.expand (top-level), setuptools.config.pyprojecttoml (top-level), setuptools.config._apply_pyprojecttoml (top-level), tomllib._parser (top-level), setuptools._vendor.tomli._parser (top-level), setuptools.command.egg_info (top-level), setuptools._distutils.command.build (top-level), setuptools._distutils.command.sdist (top-level), setuptools.glob (top-level), setuptools.command._requirestxt (top-level), setuptools.command.bdist_wheel (top-level), setuptools._vendor.wheel.cli.convert (top-level), setuptools._vendor.wheel.cli.tags (top-level), setuptools._vendor.typing_extensions (top-level), requests.compat (top-level), xml.etree.ElementTree (top-level), PIL.Image (top-level), PIL._typing (top-level), PIL.TiffImagePlugin (top-level), PIL.ImageOps (top-level), PIL.ImagePalette (top-level), PIL.ImageFilter (top-level), PIL.PngImagePlugin (top-level), PIL.Jpeg2KImagePlugin (top-level), PIL.IptcImagePlugin (top-level), setuptools._distutils.command.build_ext (top-level), _pyrepl.types (top-level), _pyrepl.readline (top-level), asyncio.base_events (top-level), asyncio.coroutines (top-level), setuptools._distutils.compilers.C.msvc (top-level)
missing module named 'collections.abc' - imported by traceback (top-level), typing (top-level), inspect (top-level), logging (top-level), importlib.resources.readers (top-level), selectors (top-level), tracemalloc (top-level) missing module named pwd - imported by posixpath (delayed, conditional, optional), shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._distutils.util (delayed, conditional, optional), netrc (delayed, conditional), getpass (delayed, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional), http.server (delayed, optional)
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional) missing module named grp - imported by shutil (delayed, optional), tarfile (optional), pathlib._local (optional), subprocess (delayed, conditional, optional), setuptools._vendor.backports.tarfile (optional), setuptools._distutils.archive_util (optional)
missing module named posix - imported by os (conditional, optional), posixpath (optional), shutil (conditional), importlib._bootstrap_external (conditional), _pyrepl.unix_console (delayed, optional)
missing module named resource - imported by posix (top-level) missing module named resource - imported by posix (top-level)
missing module named _posixsubprocess - imported by subprocess (conditional) missing module named _scproxy - imported by urllib.request (conditional)
missing module named fcntl - imported by subprocess (optional) missing module named termios - imported by getpass (optional), tty (top-level), _pyrepl.pager (delayed, optional), _pyrepl.unix_console (top-level), _pyrepl.fancy_termios (top-level), _pyrepl.unix_eventqueue (top-level)
missing module named multiprocessing.BufferTooShort - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
missing module named multiprocessing.AuthenticationError - imported by multiprocessing (top-level), multiprocessing.connection (top-level)
missing module named _posixsubprocess - imported by subprocess (conditional), multiprocessing.util (delayed)
missing module named multiprocessing.get_context - imported by multiprocessing (top-level), multiprocessing.pool (top-level), multiprocessing.managers (top-level), multiprocessing.sharedctypes (top-level)
missing module named multiprocessing.TimeoutError - imported by multiprocessing (top-level), multiprocessing.pool (top-level)
missing module named multiprocessing.set_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
missing module named multiprocessing.get_start_method - imported by multiprocessing (top-level), multiprocessing.spawn (top-level)
missing module named pyimod02_importers - imported by C:\Users\Thad\AppData\Local\Programs\Python\Python313\Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgutil.py (delayed)
missing module named typing_extensions.Buffer - imported by setuptools._vendor.typing_extensions (top-level), setuptools._vendor.wheel.wheelfile (conditional)
missing module named typing_extensions.Literal - imported by setuptools._vendor.typing_extensions (top-level), setuptools.config._validate_pyproject.formats (conditional)
missing module named typing_extensions.Self - imported by setuptools._vendor.typing_extensions (top-level), setuptools.config.expand (conditional), setuptools.config.pyprojecttoml (conditional), setuptools.config._validate_pyproject.error_reporting (conditional)
missing module named typing_extensions.deprecated - imported by setuptools._vendor.typing_extensions (top-level), setuptools._distutils.sysconfig (conditional), setuptools._distutils.command.bdist (conditional)
missing module named typing_extensions.TypeAlias - imported by setuptools._vendor.typing_extensions (top-level), setuptools._distutils.compilers.C.base (conditional), setuptools._reqs (conditional), setuptools.warnings (conditional), setuptools._path (conditional), setuptools._distutils.dist (conditional), setuptools.config.setupcfg (conditional), setuptools.config._apply_pyprojecttoml (conditional), setuptools.dist (conditional), setuptools.command.bdist_egg (conditional), setuptools.compat.py311 (conditional)
missing module named typing_extensions.Unpack - imported by setuptools._vendor.typing_extensions (top-level), setuptools._distutils.util (conditional), setuptools._distutils.compilers.C.base (conditional), setuptools._distutils.cmd (conditional)
missing module named typing_extensions.TypeVarTuple - imported by setuptools._vendor.typing_extensions (top-level), setuptools._distutils.util (conditional), setuptools._distutils.compilers.C.base (conditional), setuptools._distutils.cmd (conditional)
missing module named asyncio.DefaultEventLoopPolicy - imported by asyncio (delayed, conditional), asyncio.events (delayed, conditional)
missing module named usercustomize - imported by site (delayed, optional)
missing module named sitecustomize - imported by site (delayed, optional)
missing module named _curses - imported by curses (top-level), curses.has_key (top-level), _pyrepl.curses (optional)
missing module named fcntl - imported by subprocess (optional), _pyrepl.unix_console (top-level)
missing module named readline - imported by site (delayed, optional), rlcompleter (optional), code (delayed, conditional, optional)
missing module named _typeshed - imported by setuptools._distutils.dist (conditional), setuptools.glob (conditional), setuptools.compat.py311 (conditional)
missing module named _manylinux - imported by packaging._manylinux (delayed, optional), setuptools._vendor.packaging._manylinux (delayed, optional), setuptools._vendor.wheel.vendored.packaging._manylinux (delayed, optional)
missing module named importlib_resources - imported by setuptools._vendor.jaraco.text (optional)
missing module named trove_classifiers - imported by setuptools.config._validate_pyproject.formats (optional)
missing module named olefile - imported by PIL.FpxImagePlugin (top-level), PIL.MicImagePlugin (top-level)
missing module named 'numpy.typing' - imported by PIL._typing (conditional, optional)
missing module named defusedxml - imported by openpyxl.xml (delayed, optional), PIL.Image (optional)
missing module named 'defusedxml.ElementTree' - imported by openpyxl.xml.functions (conditional)
missing module named 'lxml.etree' - imported by openpyxl.xml.functions (conditional)
missing module named openpyxl.tests - imported by openpyxl.reader.excel (optional)
missing module named lxml - imported by openpyxl.xml (delayed, optional)
missing module named numpy - imported by openpyxl.compat.numbers (optional)
missing module named simplejson - imported by requests.compat (conditional, optional)
missing module named dummy_threading - imported by requests.cookies (optional)
missing module named zstandard - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named compression - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named 'h2.events' - imported by urllib3.http2.connection (top-level)
missing module named 'h2.connection' - imported by urllib3.http2.connection (top-level)
missing module named h2 - imported by urllib3.http2.connection (top-level)
missing module named brotli - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named brotlicffi - imported by urllib3.util.request (optional), urllib3.response (optional)
missing module named socks - imported by urllib3.contrib.socks (optional)
missing module named cryptography - imported by urllib3.contrib.pyopenssl (top-level), requests (conditional, optional)
missing module named 'OpenSSL.crypto' - imported by urllib3.contrib.pyopenssl (delayed, conditional)
missing module named 'cryptography.x509' - imported by urllib3.contrib.pyopenssl (delayed, optional)
missing module named OpenSSL - imported by urllib3.contrib.pyopenssl (top-level)
missing module named chardet - imported by requests (optional)
missing module named 'pyodide.ffi' - imported by urllib3.contrib.emscripten.fetch (delayed, optional)
missing module named pyodide - imported by urllib3.contrib.emscripten.fetch (top-level)
missing module named js - imported by urllib3.contrib.emscripten.fetch (top-level)
missing module named vms_lib - imported by platform (delayed, optional)
missing module named 'java.lang' - imported by platform (delayed, optional)
missing module named java - imported by platform (delayed)

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
dist/main/_internal/VCRUNTIME140_1.dll vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_asyncio.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_ctypes.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_elementtree.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_multiprocessing.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_overlapped.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_queue.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_ssl.pyd vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/_wmi.pyd vendored Normal file

Binary file not shown.

Binary file not shown.

4800
dist/main/_internal/certifi/cacert.pem vendored Normal file

File diff suppressed because it is too large Load Diff

0
dist/main/_internal/certifi/py.typed vendored Normal file
View File

Binary file not shown.

BIN
dist/main/_internal/libffi-8.dll vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/libssl-3.dll vendored Normal file

Binary file not shown.

BIN
dist/main/_internal/pyexpat.pyd vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,129 @@
Metadata-Version: 2.1
Name: importlib_metadata
Version: 8.0.0
Summary: Read metadata from Python packages
Author-email: "Jason R. Coombs" <jaraco@jaraco.com>
Project-URL: Source, https://github.com/python/importlib_metadata
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Requires-Python: >=3.8
Description-Content-Type: text/x-rst
License-File: LICENSE
Requires-Dist: zipp >=0.5
Requires-Dist: typing-extensions >=3.6.4 ; python_version < "3.8"
Provides-Extra: doc
Requires-Dist: sphinx >=3.5 ; extra == 'doc'
Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc'
Requires-Dist: rst.linker >=1.9 ; extra == 'doc'
Requires-Dist: furo ; extra == 'doc'
Requires-Dist: sphinx-lint ; extra == 'doc'
Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc'
Provides-Extra: perf
Requires-Dist: ipython ; extra == 'perf'
Provides-Extra: test
Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test'
Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test'
Requires-Dist: pytest-cov ; extra == 'test'
Requires-Dist: pytest-mypy ; extra == 'test'
Requires-Dist: pytest-enabler >=2.2 ; extra == 'test'
Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test'
Requires-Dist: packaging ; extra == 'test'
Requires-Dist: pyfakefs ; extra == 'test'
Requires-Dist: flufl.flake8 ; extra == 'test'
Requires-Dist: pytest-perf >=0.9.2 ; extra == 'test'
Requires-Dist: jaraco.test >=5.4 ; extra == 'test'
Requires-Dist: importlib-resources >=1.3 ; (python_version < "3.9") and extra == 'test'
.. image:: https://img.shields.io/pypi/v/importlib_metadata.svg
:target: https://pypi.org/project/importlib_metadata
.. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg
.. image:: https://github.com/python/importlib_metadata/actions/workflows/main.yml/badge.svg
:target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22
:alt: tests
.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
:target: https://github.com/astral-sh/ruff
:alt: Ruff
.. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest
:target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest
.. image:: https://img.shields.io/badge/skeleton-2024-informational
:target: https://blog.jaraco.com/skeleton
.. image:: https://tidelift.com/badges/package/pypi/importlib-metadata
:target: https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=readme
Library to access the metadata for a Python package.
This package supplies third-party access to the functionality of
`importlib.metadata <https://docs.python.org/3/library/importlib.metadata.html>`_
including improvements added to subsequent Python versions.
Compatibility
=============
New features are introduced in this third-party library and later merged
into CPython. The following table indicates which versions of this library
were contributed to different versions in the standard library:
.. list-table::
:header-rows: 1
* - importlib_metadata
- stdlib
* - 7.0
- 3.13
* - 6.5
- 3.12
* - 4.13
- 3.11
* - 4.6
- 3.10
* - 1.4
- 3.8
Usage
=====
See the `online documentation <https://importlib-metadata.readthedocs.io/>`_
for usage details.
`Finder authors
<https://docs.python.org/3/reference/import.html#finders-and-loaders>`_ can
also add support for custom package installers. See the above documentation
for details.
Caveats
=======
This project primarily supports third-party packages installed by PyPA
tools (or other conforming packages). It does not support:
- Packages in the stdlib.
- Packages installed without metadata.
Project details
===============
* Project home: https://github.com/python/importlib_metadata
* Report bugs at: https://github.com/python/importlib_metadata/issues
* Code hosting: https://github.com/python/importlib_metadata
* Documentation: https://importlib-metadata.readthedocs.io/
For Enterprise
==============
Available as part of the Tidelift Subscription.
This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
`Learn more <https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=referral&utm_campaign=github>`_.

View File

@@ -0,0 +1,32 @@
importlib_metadata-8.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
importlib_metadata-8.0.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
importlib_metadata-8.0.0.dist-info/METADATA,sha256=anuQ7_7h4J1bSEzfcjIBakPi2cyVQ7y7jklLHsBeH1k,4648
importlib_metadata-8.0.0.dist-info/RECORD,,
importlib_metadata-8.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
importlib_metadata-8.0.0.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
importlib_metadata-8.0.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19
importlib_metadata/__init__.py,sha256=tZNB-23h8Bixi9uCrQqj9Yf0aeC--Josdy3IZRIQeB0,33798
importlib_metadata/__pycache__/__init__.cpython-312.pyc,,
importlib_metadata/__pycache__/_adapters.cpython-312.pyc,,
importlib_metadata/__pycache__/_collections.cpython-312.pyc,,
importlib_metadata/__pycache__/_compat.cpython-312.pyc,,
importlib_metadata/__pycache__/_functools.cpython-312.pyc,,
importlib_metadata/__pycache__/_itertools.cpython-312.pyc,,
importlib_metadata/__pycache__/_meta.cpython-312.pyc,,
importlib_metadata/__pycache__/_text.cpython-312.pyc,,
importlib_metadata/__pycache__/diagnose.cpython-312.pyc,,
importlib_metadata/_adapters.py,sha256=rIhWTwBvYA1bV7i-5FfVX38qEXDTXFeS5cb5xJtP3ks,2317
importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743
importlib_metadata/_compat.py,sha256=73QKrN9KNoaZzhbX5yPCCZa-FaALwXe8TPlDR72JgBU,1314
importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895
importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068
importlib_metadata/_meta.py,sha256=nxZ7C8GVlcBFAKWyVOn_dn7ot_twBcbm1NmvjIetBHI,1801
importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166
importlib_metadata/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
importlib_metadata/compat/__pycache__/__init__.cpython-312.pyc,,
importlib_metadata/compat/__pycache__/py311.cpython-312.pyc,,
importlib_metadata/compat/__pycache__/py39.cpython-312.pyc,,
importlib_metadata/compat/py311.py,sha256=uqm-K-uohyj1042TH4a9Er_I5o7667DvulcD-gC_fSA,608
importlib_metadata/compat/py39.py,sha256=cPkMv6-0ilK-0Jw_Tkn0xYbOKJZc4WJKQHow0c2T44w,1102
importlib_metadata/diagnose.py,sha256=nkSRMiowlmkhLYhKhvCg9glmt_11Cox-EmLzEbqYTa8,379
importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View File

@@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: setuptools (70.1.1)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@@ -0,0 +1 @@
importlib_metadata

View File

@@ -0,0 +1,2 @@
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst.

BIN
dist/main/main.exe vendored

Binary file not shown.

BIN
dist/main/template.xlsx vendored Normal file

Binary file not shown.

415
downloader.py Normal file
View File

@@ -0,0 +1,415 @@
import requests
from typing import List, Optional, Union, Tuple
import re
class RSLoggerDownloader:
def __init__(self, base_url: str):
"""
Initialize downloader for RS Logger device
Args:
base_url: Base URL like "http://rslogger" or "http://192.168.1.100"
"""
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
def get_config(self) -> dict:
"""Get logger configuration parameters"""
url = f"{self.base_url}/logc.xml"
response = self.session.get(url, timeout=5)
response.raise_for_status()
parts = response.text.split('#')
config = {
'date_from': parts[0],
'date_to': parts[1],
'timestamp': int(parts[2]),
'file_mode': int(parts[3]),
'data_format': int(parts[4]),
'timestamp_char': chr(int(parts[5])),
'time_format': int(parts[6])
}
return config
def _get_progress(self, data: bytearray) -> int:
"""Extract progress percentage from end of data"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
if length > 0:
return data[length - 1]
return 0
def _clear_endofline(self, data: bytearray) -> int:
"""Remove trailing carriage returns and get data length"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
return length
def decode_control_characters(self, data: bytes) -> str:
"""
Decode the <13>, <10>, etc. control character sequences back to actual characters.
Args:
data: Raw bytes with <NN> sequences
Returns:
String with control characters properly decoded
"""
# Convert bytes to string
text = data.decode('ascii', errors='replace')
# Replace <NN> patterns with actual characters
def replace_control(match):
code = int(match.group(1))
return chr(code)
# Match <digits> pattern and replace with the actual character
decoded = re.sub(r'<(\d+)>', replace_control, text)
return decoded
def parse_log_data(self, chunks: List[bytearray], config: dict, channel: int = 3) -> bytearray:
"""
Parse raw log data chunks into readable format
Args:
chunks: List of raw data chunks
config: Configuration dictionary from get_config()
channel: Channel to parse (1=A, 2=B, 3=both)
Returns:
Parsed data as bytearray
"""
result = bytearray()
# State variables for parsing
state = {
'last_ch': 0,
'last_char': 0,
'day_stamp': True,
'last_ta': 0,
'last_tb': 0,
'h': 0,
'd': 0,
'm': 0,
'y': 0
}
timestamp = config['timestamp']
data_format = config['data_format']
ts_char = config['timestamp_char']
time_format = config['time_format']
# Determine time interval
t_interval = 0
if timestamp >= 50000:
t_interval = timestamp - 50000
elif timestamp > 1000:
t_interval = timestamp - 1000
elif timestamp > 2:
timestamp = 2
for chunk_idx, chunk in enumerate(chunks):
length = self._clear_endofline(chunk)
if length <= 0:
continue
# Remove progress byte at end
length -= 1
if length <= 0:
continue
parsed = self._parse_chunk(
chunk, length, channel, timestamp, t_interval,
data_format, ts_char, time_format, state
)
result.extend(parsed)
return result
def _parse_chunk(self, data: bytearray, length: int, channel: int,
timestamp: int, t_interval: int, data_format: int,
ts_char: str, time_format: int, state: dict) -> bytearray:
"""Parse a single chunk of data"""
result = bytearray()
index = 0
while index < length:
if (length - index) < 4:
break
byte0 = data[index]
byte1 = data[index + 1]
byte2 = data[index + 2]
byte3 = data[index + 3]
# Check if this is a timestamp marker (high bit set)
if byte0 & 0x80:
# Date/time record
state['h'] = byte0 & 0x7F
dtmp = byte1
mtmp = byte2
ytmp = byte3 + 2000
if state['d'] != dtmp or state['m'] != mtmp or state['y'] != ytmp:
state['d'] = dtmp
state['m'] = mtmp
state['y'] = ytmp
if timestamp != 0:
if len(result) > 0:
result.extend(b'\r\n')
date_str = f"[{ytmp}-{mtmp:02d}-{dtmp:02d}]"
result.extend(date_str.encode('ascii'))
state['last_ta'] = 0
state['last_tb'] = 0
state['last_ch'] = 0
state['day_stamp'] = True
else:
# Data record
ch = 'B' if (byte0 & 0x40) else 'A'
ch_mask = 2 if ch == 'B' else 1
minute = byte0 & 0x3F
data_byte = byte1
tu16 = byte2 | (byte3 << 8)
ms = tu16 & 0x3FF
s = (byte3 >> 2) & 0x3F
if (channel & ch_mask):
# Format time string
if time_format == 0:
h_str = f"{state['h']:2d}"
else:
if state['h'] == 0:
h_str = "A12"
elif state['h'] < 12:
h_str = f"A{state['h']:2d}"
elif state['h'] == 12:
h_str = "P12"
else:
h_str = f"P{state['h']-12:2d}"
time_str = f"{h_str}:{minute:02d}:{s:02d}.{ms:03d}"
if channel == 3:
time_str += ch
time_str += ts_char
# Decide if we need to add timestamp based on config
add_time = self._should_add_timestamp(
timestamp, t_interval, ch_mask, s, minute, state['h'],
state, channel
)
if add_time:
result.extend(b'\r\n')
result.extend(time_str.encode('ascii'))
# Add the data byte
if data_format == 1:
# Hex format
result.extend(ts_char.encode('ascii'))
result.extend(f"{data_byte:x}".encode('ascii'))
else:
# ASCII format
if data_byte < 32:
result.extend(f"<{data_byte}>".encode('ascii'))
else:
result.append(data_byte)
state['day_stamp'] = False
state['last_ch'] = ch_mask
state['last_char'] = data_byte
index += 4
return result
def _should_add_timestamp(self, timestamp: int, t_interval: int,
ch_mask: int, s: int, minute: int, h: int,
state: dict, channel: int) -> bool:
"""Determine if timestamp should be added based on configuration"""
if timestamp == 1:
return True
if timestamp >= 50000:
if timestamp < 50256:
if t_interval == state['last_char']:
return True
elif timestamp == 2 or t_interval > 0:
t = s + 60 * minute + 3600 * h
should_add = False
if state['last_ch'] != ch_mask:
should_add = True
if t_interval > 0 and channel != 3:
should_add = False
if t_interval > 0:
t_last = state['last_ta'] if ch_mask == 1 else state['last_tb']
if (t - t_last) > t_interval:
should_add = True
if should_add:
if channel == 3:
state['last_ta'] = t
state['last_tb'] = t
elif ch_mask == 1:
state['last_ta'] = t
else:
state['last_tb'] = t
return True
return False
def download_via_pages(self, output_file: str = None, decode_controls: bool = True) -> Union[bytes, str]:
"""
Download data by fetching pages (like the preview does).
This bypasses the date range issue entirely.
Args:
output_file: Optional filename to save to
decode_controls: If True, decode <13>, <10> etc. to actual control characters
Returns:
Parsed log data as bytes or string (if decoded)
"""
print("Getting configuration...")
config = self.get_config()
print(f"Config: {config}")
all_data = bytearray()
# Get first page
print("\nFetching first page...")
url = f"{self.base_url}/page.xml?Page=0"
response = self.session.get(url, timeout=10)
response.raise_for_status()
first_page = bytearray(response.content)
print(f"First page: {len(first_page)} bytes")
if len(first_page) > 4:
all_data.extend(first_page)
# Get last page to see total size
print("Fetching last page...")
url = f"{self.base_url}/page.xml?Page=1"
response = self.session.get(url, timeout=10)
response.raise_for_status()
last_page = bytearray(response.content)
print(f"Last page: {len(last_page)} bytes")
# Now keep getting next pages until we get back to the last page
print("\nFetching all pages...")
current_page = first_page
page_count = 1
max_pages = 1000 # Safety limit
while page_count < max_pages:
url = f"{self.base_url}/page.xml?Page=3" # 3 = next page
response = self.session.get(url, timeout=10)
response.raise_for_status()
next_page = bytearray(response.content)
# Check if we've reached the end (data repeats)
if next_page == current_page or next_page == last_page:
print(f"Reached end after {page_count} pages")
break
if len(next_page) > 4:
all_data.extend(next_page)
page_count += 1
if page_count % 10 == 0:
print(f"Fetched {page_count} pages, {len(all_data)} bytes total...")
current_page = next_page
print(f"\nTotal raw data collected: {len(all_data)} bytes from {page_count} pages")
# Parse the data
print("Parsing data...")
file_mode = config['file_mode']
# Treat all_data as a single chunk
chunks = [all_data]
if file_mode == 2:
# Separate files for channel A and B
data_a = self.parse_log_data(chunks, config, channel=1)
data_b = self.parse_log_data(chunks, config, channel=2)
# Decode control characters if requested
if decode_controls:
data_a_decoded = self.decode_control_characters(data_a)
data_b_decoded = self.decode_control_characters(data_b)
if output_file:
with open(f"{output_file}_A.txt", 'w', encoding='utf-8') as f:
f.write(data_a_decoded)
with open(f"{output_file}_B.txt", 'w', encoding='utf-8') as f:
f.write(data_b_decoded)
print(f"\nSaved decoded data to {output_file}_A.txt and {output_file}_B.txt")
return data_a_decoded, data_b_decoded
else:
if output_file:
with open(f"{output_file}_A.txt", 'wb') as f:
f.write(data_a)
with open(f"{output_file}_B.txt", 'wb') as f:
f.write(data_b)
print(f"\nSaved raw data to {output_file}_A.txt and {output_file}_B.txt")
return data_a, data_b
else:
# Combined file
data = self.parse_log_data(chunks, config, channel=3)
# Decode control characters if requested
if decode_controls:
data_decoded = self.decode_control_characters(data)
if output_file:
with open(f"{output_file}.txt", 'w', encoding='utf-8') as f:
f.write(data_decoded)
print(f"\nSaved {len(data_decoded)} characters to {output_file}.txt")
return data_decoded
else:
if output_file:
with open(f"{output_file}.txt", 'wb') as f:
f.write(data)
print(f"\nSaved {len(data)} bytes to {output_file}.txt")
return data
# Usage example
if __name__ == "__main__":
downloader = RSLoggerDownloader("http://rslogger")
print("Downloading via page-by-page method...")
print("="*60 + "\n")
data = downloader.download_via_pages(
output_file="rslogger_data",
decode_controls=True # This will decode <13>, <10> etc.
)
if data and len(data) > 0:
print("\n" + "="*60)
print("Download complete!")
print("="*60)
print(f"\nFirst 1000 characters:")
print(data[:1000])
else:
print("\nNo data was downloaded.")

805
main.py
View File

@@ -5,7 +5,408 @@ from tkinter import filedialog, messagebox
import subprocess import subprocess
import os import os
from datetime import datetime from datetime import datetime
import platform
import requests
from typing import List, Union
try:
from openpyxl import load_workbook
XLSX_AVAILABLE = True
except ImportError:
XLSX_AVAILABLE = False
# RSLogger Downloader Classes
class RSLoggerDownloader:
def __init__(self, base_url: str):
"""
Initialize downloader for RS Logger device
Args:
base_url: Base URL like "http://rslogger" or "http://192.168.1.100"
"""
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
def get_config(self) -> dict:
"""Get logger configuration parameters"""
url = f"{self.base_url}/logc.xml"
response = self.session.get(url, timeout=5)
response.raise_for_status()
parts = response.text.split('#')
config = {
'date_from': parts[0],
'date_to': parts[1],
'timestamp': int(parts[2]),
'file_mode': int(parts[3]),
'data_format': int(parts[4]),
'timestamp_char': chr(int(parts[5])),
'time_format': int(parts[6])
}
return config
def _get_progress(self, data: bytearray) -> int:
"""Extract progress percentage from end of data"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
if length > 0:
return data[length - 1]
return 0
def _clear_endofline(self, data: bytearray) -> int:
"""Remove trailing carriage returns and get data length"""
length = len(data)
while length >= 2 and data[length-1] == 10 and data[length-2] == 13:
length -= 2
return length
def decode_control_characters(self, data: bytes) -> str:
"""
Decode the <13>, <10>, etc. control character sequences back to actual characters.
Args:
data: Raw bytes with <NN> sequences
Returns:
String with control characters properly decoded
"""
# Convert bytes to string
text = data.decode('ascii', errors='replace')
# Replace <NN> patterns with actual characters
def replace_control(match):
code = int(match.group(1))
return chr(code)
# Match <digits> pattern and replace with the actual character
decoded = re.sub(r'<(\d+)>', replace_control, text)
return decoded
def parse_log_data(self, chunks: List[bytearray], config: dict, channel: int = 3) -> bytearray:
"""
Parse raw log data chunks into readable format
Args:
chunks: List of raw data chunks
config: Configuration dictionary from get_config()
channel: Channel to parse (1=A, 2=B, 3=both)
Returns:
Parsed data as bytearray
"""
result = bytearray()
# State variables for parsing
state = {
'last_ch': 0,
'last_char': 0,
'day_stamp': True,
'last_ta': 0,
'last_tb': 0,
'h': 0,
'd': 0,
'm': 0,
'y': 0
}
timestamp = config['timestamp']
data_format = config['data_format']
ts_char = config['timestamp_char']
time_format = config['time_format']
# Determine time interval
t_interval = 0
if timestamp >= 50000:
t_interval = timestamp - 50000
elif timestamp > 1000:
t_interval = timestamp - 1000
elif timestamp > 2:
timestamp = 2
for chunk_idx, chunk in enumerate(chunks):
length = self._clear_endofline(chunk)
if length <= 0:
continue
# Remove progress byte at end
length -= 1
if length <= 0:
continue
parsed = self._parse_chunk(
chunk, length, channel, timestamp, t_interval,
data_format, ts_char, time_format, state
)
result.extend(parsed)
return result
def _parse_chunk(self, data: bytearray, length: int, channel: int,
timestamp: int, t_interval: int, data_format: int,
ts_char: str, time_format: int, state: dict) -> bytearray:
"""Parse a single chunk of data"""
result = bytearray()
index = 0
while index < length:
if (length - index) < 4:
break
byte0 = data[index]
byte1 = data[index + 1]
byte2 = data[index + 2]
byte3 = data[index + 3]
# Check if this is a timestamp marker (high bit set)
if byte0 & 0x80:
# Date/time record
state['h'] = byte0 & 0x7F
dtmp = byte1
mtmp = byte2
ytmp = byte3 + 2000
if state['d'] != dtmp or state['m'] != mtmp or state['y'] != ytmp:
state['d'] = dtmp
state['m'] = mtmp
state['y'] = ytmp
if timestamp != 0:
if len(result) > 0:
result.extend(b'\r\n')
date_str = f"[{ytmp}-{mtmp:02d}-{dtmp:02d}]"
result.extend(date_str.encode('ascii'))
state['last_ta'] = 0
state['last_tb'] = 0
state['last_ch'] = 0
state['day_stamp'] = True
else:
# Data record
ch = 'B' if (byte0 & 0x40) else 'A'
ch_mask = 2 if ch == 'B' else 1
minute = byte0 & 0x3F
data_byte = byte1
tu16 = byte2 | (byte3 << 8)
ms = tu16 & 0x3FF
s = (byte3 >> 2) & 0x3F
if (channel & ch_mask):
# Format time string
if time_format == 0:
h_str = f"{state['h']:2d}"
else:
if state['h'] == 0:
h_str = "A12"
elif state['h'] < 12:
h_str = f"A{state['h']:2d}"
elif state['h'] == 12:
h_str = "P12"
else:
h_str = f"P{state['h']-12:2d}"
time_str = f"{h_str}:{minute:02d}:{s:02d}.{ms:03d}"
if channel == 3:
time_str += ch
time_str += ts_char
# Decide if we need to add timestamp based on config
add_time = self._should_add_timestamp(
timestamp, t_interval, ch_mask, s, minute, state['h'],
state, channel
)
if add_time:
result.extend(b'\r\n')
result.extend(time_str.encode('ascii'))
# Add the data byte
if data_format == 1:
# Hex format
result.extend(ts_char.encode('ascii'))
result.extend(f"{data_byte:x}".encode('ascii'))
else:
# ASCII format
if data_byte < 32:
result.extend(f"<{data_byte}>".encode('ascii'))
else:
result.append(data_byte)
state['day_stamp'] = False
state['last_ch'] = ch_mask
state['last_char'] = data_byte
index += 4
return result
def _should_add_timestamp(self, timestamp: int, t_interval: int,
ch_mask: int, s: int, minute: int, h: int,
state: dict, channel: int) -> bool:
"""Determine if timestamp should be added based on configuration"""
if timestamp == 1:
return True
if timestamp >= 50000:
if timestamp < 50256:
if t_interval == state['last_char']:
return True
elif timestamp == 2 or t_interval > 0:
t = s + 60 * minute + 3600 * h
should_add = False
if state['last_ch'] != ch_mask:
should_add = True
if t_interval > 0 and channel != 3:
should_add = False
if t_interval > 0:
t_last = state['last_ta'] if ch_mask == 1 else state['last_tb']
if (t - t_last) > t_interval:
should_add = True
if should_add:
if channel == 3:
state['last_ta'] = t
state['last_tb'] = t
elif ch_mask == 1:
state['last_ta'] = t
else:
state['last_tb'] = t
return True
return False
def download_via_pages(self, output_file: str = None, decode_controls: bool = True) -> Union[bytes, str]:
"""
Download data by fetching pages (like the preview does).
This bypasses the date range issue entirely.
Args:
output_file: Optional filename to save to
decode_controls: If True, decode <13>, <10> etc. to actual control characters
Returns:
Parsed log data as bytes or string (if decoded)
"""
print("Getting configuration...")
config = self.get_config()
print(f"Config: {config}")
all_data = bytearray()
# Get first page
print("\nFetching first page...")
url = f"{self.base_url}/page.xml?Page=0"
response = self.session.get(url, timeout=10)
response.raise_for_status()
first_page = bytearray(response.content)
print(f"First page: {len(first_page)} bytes")
if len(first_page) > 4:
all_data.extend(first_page)
# Get last page to see total size
print("Fetching last page...")
url = f"{self.base_url}/page.xml?Page=1"
response = self.session.get(url, timeout=10)
response.raise_for_status()
last_page = bytearray(response.content)
print(f"Last page: {len(last_page)} bytes")
# Now keep getting next pages until we get back to the last page
print("\nFetching all pages...")
current_page = first_page
page_count = 1
max_pages = 1000 # Safety limit
while page_count < max_pages:
url = f"{self.base_url}/page.xml?Page=3" # 3 = next page
response = self.session.get(url, timeout=10)
response.raise_for_status()
next_page = bytearray(response.content)
# Check if we've reached the end (data repeats)
if next_page == current_page or next_page == last_page:
print(f"Reached end after {page_count} pages")
break
if len(next_page) > 4:
all_data.extend(next_page)
page_count += 1
if page_count % 10 == 0:
print(f"Fetched {page_count} pages, {len(all_data)} bytes total...")
current_page = next_page
print(f"\nTotal raw data collected: {len(all_data)} bytes from {page_count} pages")
# Parse the data
print("Parsing data...")
file_mode = config['file_mode']
# Treat all_data as a single chunk
chunks = [all_data]
if file_mode == 2:
# Separate files for channel A and B
data_a = self.parse_log_data(chunks, config, channel=1)
data_b = self.parse_log_data(chunks, config, channel=2)
# Decode control characters if requested
if decode_controls:
data_a_decoded = self.decode_control_characters(data_a)
data_b_decoded = self.decode_control_characters(data_b)
if output_file:
with open(f"{output_file}_A.txt", 'w', encoding='utf-8') as f:
f.write(data_a_decoded)
with open(f"{output_file}_B.txt", 'w', encoding='utf-8') as f:
f.write(data_b_decoded)
print(f"\nSaved decoded data to {output_file}_A.txt and {output_file}_B.txt")
return data_a_decoded, data_b_decoded
else:
if output_file:
with open(f"{output_file}_A.txt", 'wb') as f:
f.write(data_a)
with open(f"{output_file}_B.txt", 'wb') as f:
f.write(data_b)
print(f"\nSaved raw data to {output_file}_A.txt and {output_file}_B.txt")
return data_a, data_b
else:
# Combined file
data = self.parse_log_data(chunks, config, channel=3)
# Decode control characters if requested
if decode_controls:
data_decoded = self.decode_control_characters(data)
if output_file:
with open(f"{output_file}.txt", 'w', encoding='utf-8') as f:
f.write(data_decoded)
print(f"\nSaved {len(data_decoded)} characters to {output_file}.txt")
return data_decoded
else:
if output_file:
with open(f"{output_file}.txt", 'wb') as f:
f.write(data)
print(f"\nSaved {len(data)} bytes to {output_file}.txt")
return data
# Log parsing and conversion functions
def parse_logs(log_text): def parse_logs(log_text):
lines = log_text.splitlines() lines = log_text.splitlines()
if lines and '=~' in lines[0]: if lines and '=~' in lines[0]:
@@ -32,7 +433,7 @@ def parse_logs(log_text):
try: try:
dt = datetime.strptime(ts_line, '%I:%M %p %m/%d/%y') dt = datetime.strptime(ts_line, '%I:%M %p %m/%d/%y')
excel_ts = dt.strftime('%m/%d/%Y %I:%M %p') excel_ts = dt.strftime('%m/%d/%Y %I:%M %p')
pairs.append((weight, units, excel_ts)) pairs.append((weight, units, excel_ts, dt))
print((weight, units, excel_ts)) print((weight, units, excel_ts))
except ValueError: except ValueError:
i += 1 i += 1
@@ -45,32 +446,212 @@ def parse_logs(log_text):
return pairs return pairs
def remove_duplicates(pairs):
"""Remove duplicate entries based on weight (within 20) and timestamp (within 1 minute)
Returns list of tuples where each tuple contains all the duplicate entries"""
if not pairs:
return []
# Group entries that are duplicates
deduplicated = []
i = 0
while i < len(pairs):
weight, units, excel_ts, dt = pairs[i]
duplicate_group = [(weight, units, excel_ts)]
# Look ahead for duplicates
j = i + 1
while j < len(pairs):
next_weight, next_units, next_excel_ts, next_dt = pairs[j]
# Check if it's a duplicate:
# - Weight within 20
# - Timestamp within 1 minute
weight_diff = abs(weight - next_weight)
time_diff = abs((dt - next_dt).total_seconds())
if weight_diff <= 20 and time_diff <= 60:
# It's a duplicate, add to group
duplicate_group.append((next_weight, next_units, next_excel_ts))
j += 1
else:
break
# Add the group as a tuple
deduplicated.append(tuple(duplicate_group))
i = j if j > i + 1 else i + 1
return deduplicated
def write_csv1(pairs, filename): def write_csv1(pairs, filename):
"""Write sequential CSV with duplicates pushed to the right"""
with open(filename, 'w', newline='') as f: with open(filename, 'w', newline='') as f:
writer = csv.writer(f) writer = csv.writer(f)
writer.writerow(['WEIGHT', 'UNITS', 'TIME'])
for weight, units, timestamp in pairs: # Find max number of duplicates to determine column count
writer.writerow([weight, units, timestamp]) max_dups = max(len(group) for group in pairs)
# Create headers: WEIGHT, UNITS, TIME repeated for each duplicate
headers = []
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'WEIGHT{suffix}', f'UNITS{suffix}', f'TIME{suffix}'])
writer.writerow(headers)
# Write data rows
for group in pairs:
row = []
for weight, units, timestamp in group:
row.extend([weight, units, timestamp])
# Pad with empty strings if this group has fewer duplicates than max
while len(row) < max_dups * 3:
row.append('')
writer.writerow(row)
def write_csv2(pairs, filename): def write_csv2(pairs, filename):
"""Write joined CSV with duplicates pushed to the right"""
with open(filename, 'w', newline='') as f: with open(filename, 'w', newline='') as f:
writer = csv.writer(f) writer = csv.writer(f)
writer.writerow(['GROSS_WT', 'TARE_WT', 'NET_WT', 'GROSS_T', 'TARE_T', 'GROSS_UNITS', 'TARE_UNITS', 'NET_UNITS'])
# Find max number of duplicates to determine column count
max_dups = max(len(group) for group in pairs) if pairs else 1
# Create headers for gross and tare, repeated for duplicates
headers = []
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'GROSS_WT{suffix}', f'GROSS_UNITS{suffix}', f'GROSS_T{suffix}'])
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'TARE_WT{suffix}', f'TARE_UNITS{suffix}', f'TARE_T{suffix}'])
# Add NET columns at the end
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
headers.extend([f'NET_WT{suffix}', f'NET_UNITS{suffix}'])
writer.writerow(headers)
# Process pairs (gross/tare)
for j in range(0, len(pairs), 2): for j in range(0, len(pairs), 2):
row = []
# Write all GROSS entries
if j < len(pairs):
gross_group = pairs[j]
for weight, units, timestamp in gross_group:
row.extend([weight, units, timestamp])
# Pad gross to max_dups
while len(row) < max_dups * 3:
row.append('')
# Write all TARE entries
if j + 1 < len(pairs): if j + 1 < len(pairs):
gross_weight, gross_units, gross_time = pairs[j] tare_group = pairs[j + 1]
tare_weight, tare_units, tare_time = pairs[j + 1] for weight, units, timestamp in tare_group:
net = gross_weight - tare_weight row.extend([weight, units, timestamp])
# Pad tare to max_dups
while len(row) < max_dups * 6:
row.append('')
net_units = tare_units # Calculate NET for each duplicate pair
if (tare_units != gross_units): gross_group = pairs[j]
net_units = 'MISMATCH' for k in range(max_dups):
net = 'UNIT MISMATCH' if k < len(gross_group) and k < len(tare_group):
gross_weight = gross_group[k][0]
gross_units = gross_group[k][1]
tare_weight = tare_group[k][0]
tare_units = tare_group[k][1]
writer.writerow([gross_weight, tare_weight, net, gross_time, tare_time, gross_units, tare_units, net_units]) if gross_units == tare_units:
if (len(pairs) % 2): # if odd number of items net = gross_weight - tare_weight
gross_weight, gross_units, gross_time = pairs[-1] net_units = tare_units
writer.writerow([gross_weight, '', '', gross_time, '', gross_units, '', '']) else:
net = 'UNIT MISMATCH'
net_units = 'MISMATCH'
row.extend([net, net_units])
else:
row.extend(['', ''])
else:
# Odd number of items, pad tare and net
while len(row) < max_dups * 6:
row.append('')
for k in range(max_dups):
row.extend(['', ''])
writer.writerow(row)
def write_xlsx(pairs, output_filename, template_path='template.xlsx'):
"""Write sequential data to XLSX file using template with duplicates pushed to the right"""
if not XLSX_AVAILABLE:
raise ImportError("openpyxl is required for XLSX export. Install with: pip install openpyxl")
if not os.path.exists(template_path):
raise FileNotFoundError(f"Template file not found: {template_path}")
# Load the template workbook (data_only=False preserves formulas)
wb = load_workbook(template_path, data_only=False)
# Check if SEQUENTIAL sheet exists
if 'SEQUENTIAL' not in wb.sheetnames:
raise ValueError("Template does not contain a 'SEQUENTIAL' sheet")
# Get the SEQUENTIAL sheet
ws = wb['SEQUENTIAL']
# Clear existing data (starting from row 2, assuming row 1 has headers)
# First, delete all rows below the header
if ws.max_row > 1:
ws.delete_rows(2, ws.max_row - 1)
# Find max number of duplicates
max_dups = max(len(group) for group in pairs) if pairs else 1
# Update headers if needed (row 1)
col = 1
for i in range(max_dups):
suffix = f"_{i+1}" if i > 0 else ""
ws.cell(row=1, column=col, value=f'WEIGHT{suffix}')
ws.cell(row=1, column=col+1, value=f'UNITS{suffix}')
ws.cell(row=1, column=col+2, value=f'TIME{suffix}')
col += 3
# Write data starting from row 2
for row_idx, group in enumerate(pairs, start=2):
col = 1
for weight, units, timestamp in group:
ws.cell(row=row_idx, column=col, value=weight)
ws.cell(row=row_idx, column=col+1, value=units)
ws.cell(row=row_idx, column=col+2, value=timestamp)
col += 3
# Set COMBINED as the active sheet (default sheet when opened)
if 'COMBINED' in wb.sheetnames:
wb.active = wb.sheetnames.index('COMBINED')
# Save to new filename (never overwrite template)
wb.save(output_filename)
def show_files_in_explorer(files):
"""Open file explorer and highlight the generated files"""
if not files:
return
# Get the directory of the first file
directory = os.path.dirname(os.path.abspath(files[0]))
system = platform.system()
if system == 'Windows':
# On Windows, use explorer with /select to highlight the file
# If multiple files, just select the first one
subprocess.run(['explorer', '/select,', os.path.abspath(files[0])])
elif system == 'Darwin': # macOS
# On Mac, use 'open' with -R to reveal in Finder
subprocess.run(['open', '-R', os.path.abspath(files[0])])
else: # Linux and others
# On Linux, just open the directory
subprocess.run(['xdg-open', directory])
def run_update_script(): def run_update_script():
try: try:
@@ -85,50 +666,196 @@ def run_update_script():
except Exception as e: except Exception as e:
messagebox.showerror("Error", f"Failed to run update.sh: {str(e)}") messagebox.showerror("Error", f"Failed to run update.sh: {str(e)}")
def select_input_file(): def convert_local_file():
filename = filedialog.askopenfilename(filetypes=[("Text files", "*.txt"), ("All files", "*.*")]) """Convert a local file selected by the user"""
if filename: input_file = filedialog.askopenfilename(
input_file_var.set(filename) title="Select Log File to Convert",
filetypes=[("Text files", "*.txt"), ("All files", "*.*")]
def process_files(): )
input_file = input_file_var.get()
if not input_file: if not input_file:
messagebox.showerror("Error", "Please select an input file.") return # User cancelled
generate_csv = csv_var.get()
generate_xlsx = xlsx_var.get()
if not generate_csv and not generate_xlsx:
messagebox.showerror("Error", "Please select at least one output format (CSV or XLSX).")
return return
# Auto-generate output filenames # Auto-generate output filenames
base = os.path.splitext(input_file)[0] base = os.path.splitext(input_file)[0]
output_file1 = f"{base}.sequential.csv" output_file1 = f"{base}.sequential.csv"
output_file2 = f"{base}.joined.csv" output_file2 = f"{base}.joined.csv"
output_xlsx = f"{base}.xlsx"
generated_files = []
error_messages = []
try: try:
with open(input_file, 'r') as f: with open(input_file, 'r') as f:
log_text = f.read() log_text = f.read()
pairs = parse_logs(log_text) pairs = parse_logs(log_text)
write_csv1(pairs, output_file1)
write_csv2(pairs, output_file2) # Remove duplicates
messagebox.showinfo("Success", f"CSV files generated:\n{output_file1}\n{output_file2}") pairs = remove_duplicates(pairs)
# Generate CSV files if checked
if generate_csv:
try:
write_csv1(pairs, output_file1)
generated_files.append(output_file1)
write_csv2(pairs, output_file2)
generated_files.append(output_file2)
except Exception as e:
error_messages.append(f"CSV export failed: {str(e)}")
# Generate XLSX file if checked
if generate_xlsx:
try:
write_xlsx(pairs, output_xlsx)
generated_files.append(output_xlsx)
except ImportError as e:
error_messages.append(f"XLSX export skipped: {str(e)}")
except Exception as e:
error_messages.append(f"XLSX export failed: {str(e)}")
# Build success message
if generated_files:
files_list = "\n".join(generated_files)
error_info = ""
if error_messages:
error_info = "\n\nWarnings:\n" + "\n".join(error_messages)
# Custom dialog with "Show the files" button
response = messagebox.askquestion("Success",
f"Files generated:\n{files_list}{error_info}\n\nShow the files?",
icon='info')
if response == 'yes':
show_files_in_explorer(generated_files)
else:
messagebox.showerror("Error", "No files were generated.\n\n" + "\n".join(error_messages))
except Exception as e: except Exception as e:
messagebox.showerror("Error", f"An error occurred: {str(e)}") messagebox.showerror("Error", f"An error occurred: {str(e)}")
def download_and_convert():
"""Download from RS Logger and convert"""
# Ask for destination file
output_file = filedialog.asksaveasfilename(
title="Save Downloaded Log As",
defaultextension=".txt",
filetypes=[("Text files", "*.txt"), ("All files", "*.*")]
)
if not output_file:
return # User cancelled
generate_csv = csv_var.get()
generate_xlsx = xlsx_var.get()
if not generate_csv and not generate_xlsx:
messagebox.showerror("Error", "Please select at least one output format (CSV or XLSX).")
return
try:
# Download from RS Logger
messagebox.showinfo("Downloading", "Connecting to RS Logger at http://rslogger\nThis may take a moment...")
downloader = RSLoggerDownloader("http://rslogger")
log_text = downloader.download_via_pages(
output_file=os.path.splitext(output_file)[0],
decode_controls=True
)
if not log_text:
messagebox.showerror("Error", "No data was downloaded from RS Logger")
return
# Now convert the downloaded data
base = os.path.splitext(output_file)[0]
output_file1 = f"{base}.sequential.csv"
output_file2 = f"{base}.joined.csv"
output_xlsx = f"{base}.xlsx"
generated_files = [f"{base}.txt"] # The downloaded txt file
error_messages = []
pairs = parse_logs(log_text)
# Remove duplicates
pairs = remove_duplicates(pairs)
# Generate CSV files if checked
if generate_csv:
try:
write_csv1(pairs, output_file1)
generated_files.append(output_file1)
write_csv2(pairs, output_file2)
generated_files.append(output_file2)
except Exception as e:
error_messages.append(f"CSV export failed: {str(e)}")
# Generate XLSX file if checked
if generate_xlsx:
try:
write_xlsx(pairs, output_xlsx)
generated_files.append(output_xlsx)
except ImportError as e:
error_messages.append(f"XLSX export skipped: {str(e)}")
except Exception as e:
error_messages.append(f"XLSX export failed: {str(e)}")
# Build success message
if generated_files:
files_list = "\n".join(generated_files)
error_info = ""
if error_messages:
error_info = "\n\nWarnings:\n" + "\n".join(error_messages)
# Custom dialog with "Show the files" button
response = messagebox.askquestion("Success",
f"Downloaded and converted!\n\nFiles generated:\n{files_list}{error_info}\n\nShow the files?",
icon='info')
if response == 'yes':
show_files_in_explorer(generated_files)
else:
messagebox.showerror("Error", "No files were generated.\n\n" + "\n".join(error_messages))
except Exception as e:
messagebox.showerror("Error", f"Download failed: {str(e)}")
# GUI Setup
root = tk.Tk() root = tk.Tk()
root.title("Log to CSV Converter") root.title("RS Logger Converter")
root.geometry("500x160") root.geometry("500x220")
input_file_var = tk.StringVar() csv_var = tk.BooleanVar(value=False) # CSV unchecked by default
xlsx_var = tk.BooleanVar(value=True) # XLSX checked by default
# Input file selection # Title label
input_frame = tk.Frame(root) title_label = tk.Label(root, text="RS Logger Data Converter", font=("Arial", 16, "bold"))
input_frame.pack(pady=5, padx=10, fill='x') title_label.pack(pady=10)
tk.Label(input_frame, text="Select Input Log File:").pack(side='left')
tk.Entry(input_frame, textvariable=input_file_var, width=40).pack(side='left', padx=5)
tk.Button(input_frame, text="Browse Input", bg="#2196F3", command=select_input_file).pack(side='left')
# Convert button # Output format checkboxes
tk.Button(root, text="CONVERT TO CSV", command=process_files, font=("Arial", 14), relief="raised", bg="#4CAF50", fg="white").pack(pady=10, padx=10, fill='x') format_frame = tk.Frame(root)
format_frame.pack(pady=5, padx=10, fill='x')
tk.Label(format_frame, text="Output Formats:").pack(side='left')
tk.Checkbutton(format_frame, text="CSV", variable=csv_var).pack(side='left', padx=10)
tk.Checkbutton(format_frame, text="XLSX", variable=xlsx_var).pack(side='left', padx=10)
# Convert local file button
tk.Button(root, text="CONVERT LOCAL FILE", command=convert_local_file,
font=("Arial", 12), relief="raised", bg="#2196F3", fg="white").pack(pady=5, padx=10, fill='x')
# Download and convert button
tk.Button(root, text="DOWNLOAD FROM RS LOGGER", command=download_and_convert,
font=("Arial", 12), relief="raised", bg="#4CAF50", fg="white").pack(pady=5, padx=10, fill='x')
# Update script button # Update script button
tk.Button(root, text="Update this Application", command=run_update_script, font=("Arial", 12), relief="raised").pack(pady=5, padx=10, fill='x') tk.Button(root, text="Update this Application", command=run_update_script,
font=("Arial", 10), relief="raised").pack(pady=5, padx=10, fill='x')
root.mainloop() root.mainloop()

BIN
template.xlsx Normal file

Binary file not shown.