Browse Source

changed 'split_data_frame' to 'split'

xmlparsing
Holger Frey 4 years ago
parent
commit
dc8e7d5273
  1. 4
      README.md
  2. 2
      sensospot_data/__init__.py
  3. 4
      sensospot_data/dynamic_range.py
  4. 2
      sensospot_data/utils.py
  5. 2
      tests/test_sensovation_data.py
  6. 6
      tests/test_utils.py

4
README.md

@ -24,7 +24,7 @@ other useful functions for working with the data. @@ -24,7 +24,7 @@ other useful functions for working with the data.
enhanced_data = sensospot_data.apply_exposure_map(raw_data, exposure_map)
# split the measurement according to channels
channels = sensospot_data.split_data_frame(enhanced_data "Exposure.Channel")
channels = sensospot_data.split(enhanced_data "Exposure.Channel")
# merge the two cy5 measurements together, creating an extended dynamic range
cy5_xdr = sensospot_data.create_xdr(channels["cy5"], normalized_time=25)
@ -40,7 +40,7 @@ from .parser import parse_file, parse_folder # noqa: F401 @@ -40,7 +40,7 @@ from .parser import parse_file, parse_folder # noqa: F401
- **parse_file(path_to_csv_file)**
Parses the csv file into a pandas data frame and will add additional some
meta data from the file name. Is internally also used by `parse_folder()`
- **split_data_frame(data_frame, column)**
- **split(data_frame, column)**
Splits a data frame based on the unique values of a column. Will return a
dict, with the unique values as keys and the corresponding data frame as
value

2
sensospot_data/__init__.py

@ -11,9 +11,9 @@ from pathlib import Path @@ -11,9 +11,9 @@ from pathlib import Path
import click
from .utils import ( # noqa: F401
split,
aggregate,
add_aggregate,
split_data_frame,
apply_exposure_map,
)
from .parser import parse_file, parse_folder # noqa: F401

4
sensospot_data/dynamic_range.py

@ -1,6 +1,6 @@ @@ -1,6 +1,6 @@
from pandas.api.types import is_numeric_dtype
from .utils import split_data_frame
from .utils import split
from .columns import (
RAW_DATA_POS_ID,
CALC_SPOT_OVERFLOW,
@ -42,7 +42,7 @@ def _calc_overflow_info(data_frame, column=RAW_DATA_SPOT_MEAN, limit=0.5): @@ -42,7 +42,7 @@ def _calc_overflow_info(data_frame, column=RAW_DATA_SPOT_MEAN, limit=0.5):
def _reduce_overflow(data_frame):
""" the heavy lifting for creating an extended dynamic range """
split_frames = split_data_frame(data_frame, SETTINGS_EXPOSURE_TIME)
split_frames = split(data_frame, SETTINGS_EXPOSURE_TIME)
# get the exposure times, longest first
exposure_times = sorted(split_frames.keys(), reverse=True)

2
sensospot_data/utils.py

@ -20,7 +20,7 @@ DEFAULT_AGGREGATION_INDEX = [ @@ -20,7 +20,7 @@ DEFAULT_AGGREGATION_INDEX = [
]
def split_data_frame(data_frame, column):
def split(data_frame, column):
""" splits a data frame on unique column values """
values = data_frame[column].unique()
masks = {value: (data_frame[column] == value) for value in values}

2
tests/test_sensovation_data.py

@ -5,11 +5,11 @@ def test_import_api(): @@ -5,11 +5,11 @@ def test_import_api():
from sensospot_data import ExposureInfo # noqa: F401
from sensospot_data import run # noqa: F401
from sensospot_data import blend # noqa: F401
from sensospot_data import split # noqa: F401
from sensospot_data import aggregate # noqa: F401
from sensospot_data import create_xdr # noqa: F401
from sensospot_data import parse_file # noqa: F401
from sensospot_data import parse_folder # noqa: F401
from sensospot_data import add_aggregate # noqa: F401
from sensospot_data import normalize_values # noqa: F401
from sensospot_data import split_data_frame # noqa: F401
from sensospot_data import apply_exposure_map # noqa: F401

6
tests/test_utils.py

@ -5,10 +5,10 @@ import pytest @@ -5,10 +5,10 @@ import pytest
ExposureSetting = namedtuple("ExposureSetting", ["channel", "time"])
def test_split_data_frame(data_frame_with_params):
from sensospot_data.utils import split_data_frame
def test_split(data_frame_with_params):
from sensospot_data.utils import split
result = split_data_frame(data_frame_with_params, "Well.Row")
result = split(data_frame_with_params, "Well.Row")
assert set(result.keys()) == set("ABC")
for key, value_df in result.items():

Loading…
Cancel
Save