From 74920764e701295e1d4c2e5e10b127f09fe497a5 Mon Sep 17 00:00:00 2001 From: Holger Frey Date: Wed, 31 Aug 2022 16:14:35 +0200 Subject: [PATCH] added mkdocs for documentation --- .gitignore | 7 +-- Makefile | 13 ++++- docs/explanation.md | 18 +++++++ docs/how-to-guides.md | 6 +++ docs/index.md | 20 ++++++++ docs/reference.md | 9 ++++ docs/tutorials.md | 17 +++++++ mkdocs.yml | 17 +++++++ pyproject.toml | 4 ++ src/sensospot_parser/parameters.py | 35 +++++++++---- src/sensospot_parser/parser.py | 81 +++++++++++++++++++++--------- 11 files changed, 189 insertions(+), 38 deletions(-) create mode 100644 docs/explanation.md create mode 100644 docs/how-to-guides.md create mode 100644 docs/index.md create mode 100644 docs/reference.md create mode 100644 docs/tutorials.md create mode 100644 mkdocs.yml diff --git a/.gitignore b/.gitignore index 77de0ee..a0556db 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# ---> Python +j# ---> Python # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -53,8 +53,9 @@ coverage.xml # Django stuff: *.log -# Sphinx documentation +# documentation docs/_build/ +site/ # PyBuilder target/ @@ -66,4 +67,4 @@ target/ *.h5 # Editors -.vscode/ \ No newline at end of file +.vscode/ diff --git a/Makefile b/Makefile index 3493d34..866a958 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean coverage coverall devenv install lint prepareenv repo test testall testfunctional tox +.PHONY: clean coverage coverall docs devenv install lint prepareenv repo serve-docs test testall testfunctional tox .DEFAULT_GOAL := help define BROWSER_PYSCRIPT @@ -29,7 +29,7 @@ BROWSER := python -c "$$BROWSER_PYSCRIPT" help: @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) -clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts +clean: clean-build clean-docs clean-pyc clean-test ## remove all build, test, coverage and Python artifacts clean-build: ## remove build artifacts rm -fr build/ @@ -38,6 +38,9 @@ clean-build: ## remove build artifacts find . -name '*.egg-info' -exec rm -fr {} + find . -name '*.egg' -exec rm -f {} + +clean-docs: ## remove documentation artifacts + rm -fr site/ + clean-pyc: ## remove Python file artifacts find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + @@ -79,6 +82,12 @@ coverall: lint ## full test suite, check code coverage and open coverage report tox: ## run fully isolated tests with tox tox +docs: ## build the documentation using mkdocs + mkdocs build + +serve-docs: docs ## build the documentation and serve them in a web server + mkdocs serve + install: ## install updated project.toml with flint flit install --pth-file diff --git a/docs/explanation.md b/docs/explanation.md new file mode 100644 index 0000000..71077ee --- /dev/null +++ b/docs/explanation.md @@ -0,0 +1,18 @@ +# Explanation + +This part of the project documentation focuses on a +**learning-oriented** approach. You'll learn how to +get started with the code in this project. + +> **Note:** Expand this section by considering the +> following points: + +- Help newcomers with getting started +- Teach readers about your library by making them + write code +- Inspire confidence through examples that work for + everyone, repeatably +- Give readers an immediate sense of achievement +- Show concrete examples, no abstractions +- Provide the minimum necessary explanation +- Avoid any distractions diff --git a/docs/how-to-guides.md b/docs/how-to-guides.md new file mode 100644 index 0000000..54df9be --- /dev/null +++ b/docs/how-to-guides.md @@ -0,0 +1,6 @@ +# How-To Guides + +This part of the project documentation focuses on a +**problem-oriented** approach. You'll tackle common +tasks that you might have, with the help of the code +provided in this project. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..88e9d62 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,20 @@ +# SensoSpot Parser Documentation + +This site contains the project documentation for the +`sensospot_parser` project. + + +## Table Of Contents + +The documentation follows the best practice for +project documentation as described by Daniele Procida +in the [Diátaxis documentation framework](https://diataxis.fr/) +and consists of four separate parts: + +1. [Tutorials](tutorials.md) +2. [How-To Guides](how-to-guides.md) +3. [Reference](reference.md) +4. [Explanation](explanation.md) + +Quickly find what you're looking for depending on +your use case by looking at the different pages. diff --git a/docs/reference.md b/docs/reference.md new file mode 100644 index 0000000..c9751cb --- /dev/null +++ b/docs/reference.md @@ -0,0 +1,9 @@ +# Reference + +This part of the project documentation focuses on +an **information-oriented** approach. Use it as a +reference for the technical implementation of the +`sensospot_tools` project code. + + +::: sensospot_parser.parser diff --git a/docs/tutorials.md b/docs/tutorials.md new file mode 100644 index 0000000..fba5682 --- /dev/null +++ b/docs/tutorials.md @@ -0,0 +1,17 @@ +# Tutorials + +This part of the project documentation focuses on an +**understanding-oriented** approach. You'll get a +chance to read about the background of the project, +as well as reasoning about how it was implemented. + +> **Note:** Expand this section by considering the +> following points: + +- Give context and background on your library +- Explain why you created it +- Provide multiple examples and approaches of how + to work with it +- Help the reader make connections +- Avoid writing instructions or technical descriptions + here diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..bc9f768 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,17 @@ +site_name: Sensospot Parser Docs + +nav: + - Introduction: index.md + - Tutorials: tutorials.md + - How-To Guides: how-to-guides.md + - Reference: reference.md + - Explanation: explanation.md + +repo_url: https://github.com/example/repository/ + +theme: + name: readthedocs + highlightjs: true + +plugins: + - mkdocstrings diff --git a/pyproject.toml b/pyproject.toml index 1431d02..55072e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,10 @@ dev = [ "keyring", "pre-commit", ] +docs = [ + "mkdocs", + "mkdocstrings[python]", +] [tool.black] line-length = 79 diff --git a/src/sensospot_parser/parameters.py b/src/sensospot_parser/parameters.py index 999d67e..2beb04c 100644 --- a/src/sensospot_parser/parameters.py +++ b/src/sensospot_parser/parameters.py @@ -19,8 +19,11 @@ PathLike = Union[str, pathlib.Path] def _search_params_file(folder: PathLike) -> Optional[pathlib.Path]: """searches for a exposure settings file in a folder - folder: directory to search - returns: the path to the settings file or None + Args: + folder: directory to search + + Returns: + the path to the settings file or None """ folder_path = pathlib.Path(folder) params_folder = folder_path / "Parameters" @@ -36,8 +39,11 @@ def _search_params_file(folder: PathLike) -> Optional[pathlib.Path]: def _get_channel_data(channel_node: ElementType) -> Dict[str, Any]: """parses the information from an xml node of the channel settings - channel_node: the xml node of the channel settings - returns: dict with the information + Args: + channel_node: the xml node of the channel settings + + Returns: + dict with the information """ # child.tag == "ChannelConfig1" exposure_id = int(channel_node.tag[-1]) @@ -56,8 +62,11 @@ def _get_channel_data(channel_node: ElementType) -> Dict[str, Any]: def _parse_measurement_params(params_file: PathLike) -> pandas.DataFrame: """parses the cannel informations from a settings file - params_file: path to the settings file - returns: pandas DataFrame with the parsed information + Args: + params_file: path to the settings file + + Returns: + pandas data frame with the parsed information """ file_path = pathlib.Path(params_file) with file_path.open("r") as file_handle: @@ -69,8 +78,11 @@ def _parse_measurement_params(params_file: PathLike) -> pandas.DataFrame: def get_measurement_params(folder: PathLike) -> Optional[pandas.DataFrame]: """searches the settings file and returns the parameters - folder: path to the folder with the measurement data - returns: pandas DataFrame with the parsed parameters or None + Args: + folder: path to the folder with the measurement data + + Returns: + pandas data frame with the parsed parameters or None """ params_file = _search_params_file(folder) if params_file is not None: @@ -89,8 +101,11 @@ def add_measurement_parameters( If the parameters could not be found, parsed or do not match up with the measurement data, the additional collumns will contain NaN. - measurement: the parsed measurement data - returns: the measurement data with parameters added + Args: + measurement: the parsed measurement data + + Returns: + the measurement data with parameters added """ params = get_measurement_params(folder) if params is not None: diff --git a/src/sensospot_parser/parser.py b/src/sensospot_parser/parser.py index 284139e..f5f278f 100644 --- a/src/sensospot_parser/parser.py +++ b/src/sensospot_parser/parser.py @@ -32,8 +32,11 @@ def _guess_decimal_separator(file_handle: TextIO) -> str: This is a very crude method, but depending on the language setting, different decimal separators may be used. - file_handle: a file handle to an opened csv file - returns: either '.' or ',' as a decimal separator + Args: + file_handle: a file handle to an opened csv file + + Returns: + either '.' or ',' as a decimal separator """ file_handle.seek(0) headers = next(file_handle) # noqa: F841 @@ -48,8 +51,11 @@ def _parse_csv(data_file: PathLike) -> pandas.DataFrame: Tries to guess the decimal separator from the file contents - data_file: path to the csv file - returns: pandas DataFrame with the parsed data + Args: + data_file: path to the csv file + + Returns: + pandas data frame with the parsed data """ data_path = pathlib.Path(data_file) with data_path.open("r") as handle: @@ -61,8 +67,11 @@ def _parse_csv(data_file: PathLike) -> pandas.DataFrame: def _extract_measurement_info(data_file: PathLike) -> FileInfo: """extract measurement meta data from a file name - data_file: path to the csv data file - returns: named tuple FileInfo with parsed metadata + Args: + data_file: path to the csv data file + + Returns: + named tuple FileInfo with parsed metadata """ data_path = pathlib.Path(data_file) *rest, well, exposure = data_path.stem.rsplit("_", 2) # noqa: F841 @@ -78,8 +87,11 @@ def _extract_measurement_info(data_file: PathLike) -> FileInfo: def _cleanup_data_columns(data_frame: pandas.DataFrame) -> pandas.DataFrame: """renames some data columns for consistency and drops unused columns - data_frame: pandas DataFrame with parsed measurement data - returns: pandas DataFrame, column names cleaned up + Args: + data_frame: pandas DataFrame with parsed measurement data + + Returns: + pandas DataFrame, column names cleaned up """ renamed = data_frame.rename(columns=columns.CSV_RENAME_MAP) surplus_columns = set(renamed.columns) - columns.PARSED_DATA_COLUMN_SET @@ -91,9 +103,14 @@ def parse_file(data_file: PathLike) -> pandas.DataFrame: will race a ValueError, if metadata could not be extracted - data_file: path to the csv data file - raises: ValueError if metadata could not be extracted - returns: pandas DataFrame with the parsed data + Args: + data_file: path to the csv data file + + Returns: + pandas data frame with the parsed data + + Raises: + ValueError: if metadata could not be extracted """ data_path = pathlib.Path(data_file).resolve() measurement_info = _extract_measurement_info(data_path) @@ -112,8 +129,13 @@ def parse_file(data_file: PathLike) -> pandas.DataFrame: def _parse_file_silenced(data_file: PathLike) -> Optional[pandas.DataFrame]: """parses one data file and adds metadata - data_file: path to the csv data file - returns: pandas DataFrame with the parsed data or None on error + Safety checks are supressed + + Args: + data_file: path to the csv data file + + Returns: + pandas data frame with the parsed data or None on error """ try: return parse_file(data_file) @@ -124,8 +146,10 @@ def _parse_file_silenced(data_file: PathLike) -> Optional[pandas.DataFrame]: def parse_multiple_files(file_list: Sequence[PathLike]) -> pandas.DataFrame: """parses a list of file paths to one combined data frame - file_list: collection of paths to csv data files - returns: pandas DataFrame with all parsed data combined + Args: + file_list: collection of paths to csv data files + Returns: + pandas data frame with all parsed data combined """ if not file_list: raise ValueError("Empty file list provided") @@ -141,8 +165,11 @@ def parse_multiple_files(file_list: Sequence[PathLike]) -> pandas.DataFrame: def find_csv_files(folder: PathLike) -> Sequence[pathlib.Path]: """returns all csv files in a folder - folder: path to the folder to search for csv files - returns: iterator with the found csv files + Args: + folder: path to the folder to search for csv files + + Returns: + iterator with the found csv files """ folder_path = pathlib.Path(folder) files = (item for item in folder_path.iterdir() if item.is_file()) @@ -153,9 +180,14 @@ def find_csv_files(folder: PathLike) -> Sequence[pathlib.Path]: def _sanity_check(data_frame: pandas.DataFrame) -> pandas.DataFrame: """checks some basic constrains of a combined data frame - data_frame: measurement data - raises: ValueError if basic constrains are not met - returns: pandas DataFrame + Args: + data_frame: measurement data + + Returns: + a pandas DataFrame + + Raises: + ValueError: if basic constrains are not met """ field_rows = len(data_frame[columns.WELL_ROW].unique()) field_cols = len(data_frame[columns.WELL_COLUMN].unique()) @@ -178,9 +210,12 @@ def parse_folder(folder: PathLike, quiet: bool = False) -> pandas.DataFrame: Will raise an ValueError, if no sensospot data could be found in the folder - folder: path of folder containing data files - quiet: skip sanity check, defaults to False - returns: pandas dataframe with parsed data + Args: + folder: path of folder containing data files + quiet: skip sanity check, defaults to False + + Returns: + a pandas data frame with parsed data """ folder_path = pathlib.Path(folder) file_list = find_csv_files(folder_path)