parse-timelogs-for-upload/pomodoro_to_harvest.py

172 lines
8.4 KiB
Python
Raw Normal View History

import pandas as pd
import numpy as np
import glob
import re
2021-05-03 04:04:18 +00:00
import sys
# Import our local settings management.
2021-05-02 22:39:37 +00:00
import settings
if hasattr(sys, 'ps1'):
import copy
debug = True
else:
debug = False
if settings.pomodoro_logfile():
# This works for one file:
timelog = pd.read_csv(settings.pomodoro_logfile())
else:
# For multiple files:
path = settings.pomodoro_logpath()
all_files = glob.glob(path + "*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
timelog = pd.concat(li, axis=0, ignore_index=True)
if debug:
imported = copy.deepcopy(timelog)
2021-05-26 16:48:36 +00:00
timelog.drop_duplicates(inplace=True)
if debug:
nodupes = copy.deepcopy(timelog)
2021-04-28 03:37:37 +00:00
# Dump bad data. The real solution here is to get rid of the damned 'Cancel'
# button on the Pomodoro Prompt dialog, but i don't know how to do that, so we
# need to drop the rows where the work task description is blank, which is
# coming in as not a number for reasons i'm not entirely clear on. Maybe
# because it's the last row of the spreadsheet? Anyway we cannot do anything
# with no data in the description, so drop them at the outset.
# We can allow no data in the 'intention' so define the three columns to check:
timelog = timelog.dropna(subset=['started', 'recorded', 'description'])
2021-04-28 03:37:37 +00:00
timelog = timelog.reset_index(drop=True)
if debug:
dropna = copy.deepcopy(timelog)
# For debugging, keep originals around.
timelog["orig_desc"] = timelog["description"]
timelog["orig_started"] = timelog["started"]
timelog["orig_recorded"] = timelog["recorded"]
# Clean up description before we go to work on it.
timelog['description'] = timelog['description'].str.strip()
2021-04-28 04:16:15 +00:00
# Allow multiple entries to be put into one prompt by splitting with semicolon.
# TODO make this a flag since it's possible to use semicolons without meaning
# to make multiple task entries at once.
timelog["description"] = list(timelog["description"].str.split(";"))
timelog = timelog.explode("description").reset_index()
if debug:
mess = copy.deepcopy(timelog)
timelog["started"] = pd.to_datetime(timelog["started"], errors='coerce').dt.tz_convert("US/Eastern")
timelog["recorded"] = pd.to_datetime(timelog["recorded"]).dt.tz_convert("US/Eastern")
latest_recorded = settings.pomodoro_latest_recorded()
if latest_recorded:
timelog = timelog[timelog.recorded > pd.to_datetime(latest_recorded)]
timelog["time"] = 30
# A pomodoro started before 3am Eastern time is considered to be a continuation
# of the day before, so we are, effectively, on West Coast time for determining
# the day we want to associate a time entry with. PomodoroPrompt saves as UTC.
timelog["date"] = timelog["started"].dt.tz_convert("US/Pacific").dt.date
2021-04-27 15:31:27 +00:00
timelog["day_of_week"] = pd.to_datetime(timelog["date"]).dt.day_name()
2021-04-28 02:30:07 +00:00
# If a project has been specified (task prefixed with a colon), then put the
# project in its own column.
timelog['project'] = (np.where(timelog['description'].str.contains(': '), timelog['description'].str.split(': ', 1).str[0], None))
timelog['description'] = (np.where(timelog['description'].str.contains(': '), timelog['description'].str.split(': ', 1).str[1], timelog['description']))
2021-04-28 15:05:02 +00:00
# Mid-work clean up of description and new project.
timelog['description'] = timelog['description'].str.strip()
2021-04-28 15:05:02 +00:00
timelog['project'] = timelog['project'].str.strip()
# If a multiplier has been provided (an asterisk and an integer at the end of a
# task), then multiply the time by it and remove it from the description.
2021-04-28 03:37:37 +00:00
# Ensure we're splitting on the same asterisk we found: Use the end of string
# signifier in the regular expression ($), and split from the right.
p = re.compile(r'\*\s*\d$')
# On some systems, using np.where worked but others failed. Why it worked is
# unknown but why it failed is because numpy where evaluates all parts, even
# the parts that will never get used because the where clause does not apply!
# This caused the chained strings to fail because— no string.
# timelog['tmp_multiplier'] = (np.where(timelog['description'].str.contains('\*\s*\d$'), timelog['description'].str.rsplit('*', 1).str[1].str.strip(), 1))
# timelog['description'] = (np.where(timelog['description'].str.contains('\*\s*\d$'), timelog['description'].str.rsplit('*', 1).str[0], timelog['description']))
timelog['tmp_multiplier'] = timelog['description'].apply(lambda x: x.rsplit('*', 1)[1].strip() if p.search(x) else 1)
timelog['description'] = timelog['description'].apply(lambda x: x.rsplit('*', 1)[0] if p.search(x) else x)
2021-04-28 03:37:37 +00:00
timelog["time"] = timelog["time"] * timelog['tmp_multiplier'].astype(int)
timelog.drop(columns=['tmp_multiplier'], inplace=True)
2021-04-28 03:37:37 +00:00
# Clean up description again, after it has been sliced and diced.
timelog['description'] = timelog['description'].str.strip()
2021-04-28 15:14:39 +00:00
# Replace irregular-but-known project names with ones timetracking tools use.
replacement_project_names = {
"Boston Modern Orchestra Project": ["BMOP", "BMOP.org"],
"CRLA.org upgrade": ["CRLA", "CRLA upgrade"],
2021-06-01 15:16:06 +00:00
"Contrib": ["Contributing", "Agaric contrib", "Agaric contributions"],
"Drutopia": ["Drutopia improvements", "Drutopia overhead"],
2021-06-01 15:16:06 +00:00
"EC Connect": ["eccconectcolorado.org", "Denver Early Childhood", "ECconnect", "ECconnectColorado"],
"encuentro 5 sites": ["e5", "Encuentro"],
2021-05-28 18:38:09 +00:00
"Family & Home": ["Family and Home", "Family home"],
2021-04-28 15:14:39 +00:00
"Find It Cambridge": ["Find It", "FIC", "Cambridge"],
"GEO Support": ["GEO", "GEO.coop", "Grassroots Economic Organizing"],
2021-06-01 15:16:06 +00:00
"Internal": ["Agaric", "Agaric internal"],
2021-06-01 15:06:02 +00:00
"Leads": ["Lead", "Agaric leads", "Lead followups"],
2021-06-01 15:16:06 +00:00
"Internal: Personal Learning": ["Learning", "Personal learning"],
"MASS Continuous Improvement": ["MASS Design Group", "MASS", "MASS Design"],
"NICHQ Data Upgrade": ["NICHQ Data"],
"NICHQ Support": ["NICHQ", "NICHQ support"],
2021-06-01 20:39:49 +00:00
"NICHQ FL CMS LAN": ["FL CMS LAN", "flcmslan", "NICHQ FLCMSLAN"],
2021-06-01 15:06:02 +00:00
"Near North camp": ["Near North Camp", "Near North defense", "Encampment support", "Camp support", "NN camp defense", "NN camp", "NN defense", "Near North camp defense"],
2021-06-01 15:16:06 +00:00
"Internal: Network Engagement": ["Network Engagement", "network engagement", "Network engagment", "Social media", "Network building", "Agaric network engagement"],
2021-06-01 20:39:49 +00:00
"Personal": ["Personal/external", "Personal / external", "External"],
"SCDTDP Collaboratory Data Site System Security": ["SCDTDP", "NICHQ SCDTDP", "NICHQ security"],
2021-06-01 15:06:02 +00:00
"Teachers with GUTS": ["TWIG", "GUTS"],
"The Propaganda Site": ["TPS", "Propaganda Site", "The Propganda Site", "Murat & Clay"],
2021-04-28 15:14:39 +00:00
}
for preferred, alternatives in replacement_project_names.items():
# We compare all alternatives to lower case versions, and add the
# preferred output to this list for that purpose, but note that what we use
# as preferred retains its capitalization.
alternatives.append(preferred)
alternatives = [item.lower() for item in alternatives]
timelog.loc[timelog.project.str.lower().isin(alternatives), "project"] = preferred
2021-04-28 15:14:39 +00:00
2021-06-01 20:43:57 +00:00
# If a compound project was specified, break that out into a sub-project (in
# Harvest, we use Task, which is really task type, for this.
timelog['subproject'] = (np.where(timelog['project'].str.contains(': '), timelog['project'].str.split(': ', 1).str[1], None))
timelog['project'] = (np.where(timelog['project'].str.contains(': '), timelog['description'].str.split(': ', 1).str[0], timelog['description']))
# Condense duplicate entries by date, summing the minutes spent, and listing
# the first started and last recorded times for each task.
# The fillna is essential or we drop entries with blank ('None') projects.
tl = timelog.groupby(["date", timelog.project.fillna(""), "description"]).agg({"time": 'sum', "started": 'min', "recorded": 'max'}).reset_index()
# We're doing the final conversion to Harvest as a separate step because we
# want to factor out all of the above non-Harvest-specific logic.
latest = tl.recorded.max()
# Filter out any blank projects and any projects we know are not in Harvest.
# We also do the opposite to get a CSV of the excluded items.
non_harvest_list = ["", "Personal", "Near North camp"]
other = tl[tl.project.isin(non_harvest_list)]
harvest = tl[~tl.project.isin(non_harvest_list)]
2021-05-29 08:34:08 +00:00
if not debug:
harvest.to_csv('harvest-timesheets.csv', index=False)
other.to_csv('not-harvest.csv', index=False)
settings.pomodoro_latest_recorded(latest)
else:
print("We do not write to the harvest-ready.csv nor update the latest recorded setting when run interactively in the python shell.")