Add regex for narrowing to issue number
Add columns for issue url and title Add loading forgejo_api_token from .env
This commit is contained in:
parent
cfcd09a567
commit
c3b5602706
1 changed files with 24 additions and 10 deletions
|
@ -1,10 +1,16 @@
|
||||||
import os
|
import os
|
||||||
import csv
|
import csv
|
||||||
import re
|
import re
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
FOREGEJO_API_TOKEN = os.getenv("FORGEJO_API_TOKEN")
|
||||||
|
|
||||||
|
forgejo_issue_api_string = "/repos/{owner}/{repo}/issues/{index}"
|
||||||
|
|
||||||
# {'harvest_project': ('owner', 'repo')}
|
# {'harvest_project': ('owner', 'repo')}
|
||||||
projects = {
|
projects = {
|
||||||
'harvest_project': (owner, repo),
|
'MASS Continuous Improvement': ('mass', 'mass'),
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_parse_notes_section():
|
def test_parse_notes_section():
|
||||||
|
@ -15,14 +21,13 @@ def test_parse_notes_section():
|
||||||
|
|
||||||
|
|
||||||
def parse_notes_section(notes):
|
def parse_notes_section(notes):
|
||||||
print("NOTES: ", notes)
|
|
||||||
regex_pattern = r"[Ii]ssue\s*(?:#)?\d+|#\d+"
|
regex_pattern = r"[Ii]ssue\s*(?:#)?\d+|#\d+"
|
||||||
matches = re.findall(regex_pattern, notes)
|
matches = re.findall(regex_pattern, notes)[:3]
|
||||||
if matches:
|
issue_numbers = []
|
||||||
print("MATCHES: ", matches)
|
for match in matches:
|
||||||
return matches
|
match = re.search(r"\d+", match).group()
|
||||||
|
issue_numbers.append(match)
|
||||||
|
return issue_numbers
|
||||||
|
|
||||||
def parse_harvest_csv(file=None):
|
def parse_harvest_csv(file=None):
|
||||||
if file is None:
|
if file is None:
|
||||||
|
@ -37,16 +42,25 @@ def parse_harvest_csv(file=None):
|
||||||
rows = list(csv_reader)
|
rows = list(csv_reader)
|
||||||
original_fieldnames = csv_reader.fieldnames
|
original_fieldnames = csv_reader.fieldnames
|
||||||
|
|
||||||
issue_fields = ['First Issue', 'Second Issue', 'Third Issue']
|
issue_fields = [
|
||||||
|
"First Issue Title",
|
||||||
|
"First Issue URL",
|
||||||
|
"Second Issue Title",
|
||||||
|
"Second Issue URL",
|
||||||
|
"Third Issue Title",
|
||||||
|
"Third Issue URL"
|
||||||
|
]
|
||||||
modified_fieldnames = original_fieldnames + issue_fields
|
modified_fieldnames = original_fieldnames + issue_fields
|
||||||
|
|
||||||
with open('modified_csv.csv', 'w', newline='') as csvfile:
|
with open('modified_csv.csv', 'w', newline='') as csvfile:
|
||||||
writer = csv.DictWriter(csvfile, fieldnames = modified_fieldnames)
|
writer = csv.DictWriter(csvfile, fieldnames = modified_fieldnames)
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
|
row_count = 0
|
||||||
for row in rows:
|
for row in rows:
|
||||||
issues = parse_notes_section(row['Notes'])
|
issues = parse_notes_section(row['Notes'])
|
||||||
issues_dict = dict(zip(issue_fields, issues))
|
issues_dict = dict(zip(issue_fields, issues))
|
||||||
writer.writerow(issues_dict)
|
row.update(issues_dict)
|
||||||
|
writer.writerow(row)
|
||||||
|
|
||||||
|
|
||||||
def test_program():
|
def test_program():
|
||||||
|
|
Loading…
Reference in a new issue