mirror of
https://github.com/ilri/csv-metadata-quality.git
synced 2024-12-22 12:12:18 +01:00
Format with black
This commit is contained in:
parent
b375f0e895
commit
d97dcd19db
@ -6,5 +6,5 @@ def main():
|
|||||||
app.run(argv)
|
app.run(argv)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -9,13 +9,37 @@ import sys
|
|||||||
|
|
||||||
|
|
||||||
def parse_args(argv):
|
def parse_args(argv):
|
||||||
parser = argparse.ArgumentParser(description='Metadata quality checker and fixer.')
|
parser = argparse.ArgumentParser(description="Metadata quality checker and fixer.")
|
||||||
parser.add_argument('--agrovoc-fields', '-a', help='Comma-separated list of fields to validate against AGROVOC, for example: dc.subject,cg.coverage.country')
|
parser.add_argument(
|
||||||
parser.add_argument('--input-file', '-i', help='Path to input file. Can be UTF-8 CSV or Excel XLSX.', required=True, type=argparse.FileType('r', encoding='UTF-8'))
|
"--agrovoc-fields",
|
||||||
parser.add_argument('--output-file', '-o', help='Path to output file (always CSV).', required=True, type=argparse.FileType('w', encoding='UTF-8'))
|
"-a",
|
||||||
parser.add_argument('--unsafe-fixes', '-u', help='Perform unsafe fixes.', action='store_true')
|
help="Comma-separated list of fields to validate against AGROVOC, for example: dc.subject,cg.coverage.country",
|
||||||
parser.add_argument('--version', '-V', action='version', version=f'CSV Metadata Quality v{VERSION}')
|
)
|
||||||
parser.add_argument('--exclude-fields', '-x', help='Comma-separated list of fields to skip, for example: dc.contributor.author,dc.identifier.citation')
|
parser.add_argument(
|
||||||
|
"--input-file",
|
||||||
|
"-i",
|
||||||
|
help="Path to input file. Can be UTF-8 CSV or Excel XLSX.",
|
||||||
|
required=True,
|
||||||
|
type=argparse.FileType("r", encoding="UTF-8"),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-file",
|
||||||
|
"-o",
|
||||||
|
help="Path to output file (always CSV).",
|
||||||
|
required=True,
|
||||||
|
type=argparse.FileType("w", encoding="UTF-8"),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--unsafe-fixes", "-u", help="Perform unsafe fixes.", action="store_true"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--version", "-V", action="version", version=f"CSV Metadata Quality v{VERSION}"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--exclude-fields",
|
||||||
|
"-x",
|
||||||
|
help="Comma-separated list of fields to skip, for example: dc.contributor.author,dc.identifier.citation",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
return args
|
return args
|
||||||
@ -40,11 +64,11 @@ def run(argv):
|
|||||||
skip = False
|
skip = False
|
||||||
# Split the list of excludes on ',' so we can test exact matches
|
# Split the list of excludes on ',' so we can test exact matches
|
||||||
# rather than fuzzy matches with regexes or "if word in string"
|
# rather than fuzzy matches with regexes or "if word in string"
|
||||||
for exclude in args.exclude_fields.split(','):
|
for exclude in args.exclude_fields.split(","):
|
||||||
if column == exclude and skip is False:
|
if column == exclude and skip is False:
|
||||||
skip = True
|
skip = True
|
||||||
if skip:
|
if skip:
|
||||||
print(f'Skipping {column}')
|
print(f"Skipping {column}")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -58,7 +82,7 @@ def run(argv):
|
|||||||
# Fix: missing space after comma. Only run on author and citation
|
# Fix: missing space after comma. Only run on author and citation
|
||||||
# fields for now, as this problem is mostly an issue in names.
|
# fields for now, as this problem is mostly an issue in names.
|
||||||
if args.unsafe_fixes:
|
if args.unsafe_fixes:
|
||||||
match = re.match(r'^.*?(author|citation).*$', column)
|
match = re.match(r"^.*?(author|citation).*$", column)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
df[column] = df[column].apply(fix.comma_space, field_name=column)
|
df[column] = df[column].apply(fix.comma_space, field_name=column)
|
||||||
|
|
||||||
@ -83,32 +107,32 @@ def run(argv):
|
|||||||
# Check: invalid AGROVOC subject
|
# Check: invalid AGROVOC subject
|
||||||
if args.agrovoc_fields:
|
if args.agrovoc_fields:
|
||||||
# Identify fields the user wants to validate against AGROVOC
|
# Identify fields the user wants to validate against AGROVOC
|
||||||
for field in args.agrovoc_fields.split(','):
|
for field in args.agrovoc_fields.split(","):
|
||||||
if column == field:
|
if column == field:
|
||||||
df[column] = df[column].apply(check.agrovoc, field_name=column)
|
df[column] = df[column].apply(check.agrovoc, field_name=column)
|
||||||
|
|
||||||
# Check: invalid language
|
# Check: invalid language
|
||||||
match = re.match(r'^.*?language.*$', column)
|
match = re.match(r"^.*?language.*$", column)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
df[column] = df[column].apply(check.language)
|
df[column] = df[column].apply(check.language)
|
||||||
|
|
||||||
# Check: invalid ISSN
|
# Check: invalid ISSN
|
||||||
match = re.match(r'^.*?issn.*$', column)
|
match = re.match(r"^.*?issn.*$", column)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
df[column] = df[column].apply(check.issn)
|
df[column] = df[column].apply(check.issn)
|
||||||
|
|
||||||
# Check: invalid ISBN
|
# Check: invalid ISBN
|
||||||
match = re.match(r'^.*?isbn.*$', column)
|
match = re.match(r"^.*?isbn.*$", column)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
df[column] = df[column].apply(check.isbn)
|
df[column] = df[column].apply(check.isbn)
|
||||||
|
|
||||||
# Check: invalid date
|
# Check: invalid date
|
||||||
match = re.match(r'^.*?date.*$', column)
|
match = re.match(r"^.*?date.*$", column)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
df[column] = df[column].apply(check.date, field_name=column)
|
df[column] = df[column].apply(check.date, field_name=column)
|
||||||
|
|
||||||
# Check: filename extension
|
# Check: filename extension
|
||||||
if column == 'filename':
|
if column == "filename":
|
||||||
df[column] = df[column].apply(check.filename_extension)
|
df[column] = df[column].apply(check.filename_extension)
|
||||||
|
|
||||||
# Write
|
# Write
|
||||||
|
@ -18,10 +18,10 @@ def issn(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
|
|
||||||
if not issn.is_valid(value):
|
if not issn.is_valid(value):
|
||||||
print(f'Invalid ISSN: {value}')
|
print(f"Invalid ISSN: {value}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -43,10 +43,10 @@ def isbn(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
|
|
||||||
if not isbn.is_valid(value):
|
if not isbn.is_valid(value):
|
||||||
print(f'Invalid ISBN: {value}')
|
print(f"Invalid ISBN: {value}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -64,13 +64,13 @@ def separators(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
|
|
||||||
# After splitting, see if there are any remaining "|" characters
|
# After splitting, see if there are any remaining "|" characters
|
||||||
match = re.findall(r'^.*?\|.*$', value)
|
match = re.findall(r"^.*?\|.*$", value)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Invalid multi-value separator: {field}')
|
print(f"Invalid multi-value separator: {field}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -88,22 +88,22 @@ def date(field, field_name):
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
if pd.isna(field):
|
if pd.isna(field):
|
||||||
print(f'Missing date ({field_name}).')
|
print(f"Missing date ({field_name}).")
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
multiple_dates = field.split('||')
|
multiple_dates = field.split("||")
|
||||||
|
|
||||||
# We don't allow multi-value date fields
|
# We don't allow multi-value date fields
|
||||||
if len(multiple_dates) > 1:
|
if len(multiple_dates) > 1:
|
||||||
print(f'Multiple dates not allowed ({field_name}): {field}')
|
print(f"Multiple dates not allowed ({field_name}): {field}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Check if date is valid YYYY format
|
# Check if date is valid YYYY format
|
||||||
datetime.strptime(field, '%Y')
|
datetime.strptime(field, "%Y")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -111,7 +111,7 @@ def date(field, field_name):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Check if date is valid YYYY-MM format
|
# Check if date is valid YYYY-MM format
|
||||||
datetime.strptime(field, '%Y-%m')
|
datetime.strptime(field, "%Y-%m")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -119,11 +119,11 @@ def date(field, field_name):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Check if date is valid YYYY-MM-DD format
|
# Check if date is valid YYYY-MM-DD format
|
||||||
datetime.strptime(field, '%Y-%m-%d')
|
datetime.strptime(field, "%Y-%m-%d")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print(f'Invalid date ({field_name}): {field}')
|
print(f"Invalid date ({field_name}): {field}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ def suspicious_characters(field, field_name):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# List of suspicious characters, for example: ́ˆ~`
|
# List of suspicious characters, for example: ́ˆ~`
|
||||||
suspicious_characters = ['\u00B4', '\u02C6', '\u007E', '\u0060']
|
suspicious_characters = ["\u00B4", "\u02C6", "\u007E", "\u0060"]
|
||||||
|
|
||||||
for character in suspicious_characters:
|
for character in suspicious_characters:
|
||||||
# Find the position of the suspicious character in the string
|
# Find the position of the suspicious character in the string
|
||||||
@ -156,8 +156,10 @@ def suspicious_characters(field, field_name):
|
|||||||
# character and spanning enough of the rest to give a preview,
|
# character and spanning enough of the rest to give a preview,
|
||||||
# but not too much to cause the line to break in terminals with
|
# but not too much to cause the line to break in terminals with
|
||||||
# a default of 80 characters width.
|
# a default of 80 characters width.
|
||||||
suspicious_character_msg = f'Suspicious character ({field_name}): {field_subset}'
|
suspicious_character_msg = (
|
||||||
print(f'{suspicious_character_msg:1.80}')
|
f"Suspicious character ({field_name}): {field_subset}"
|
||||||
|
)
|
||||||
|
print(f"{suspicious_character_msg:1.80}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -177,22 +179,22 @@ def language(field):
|
|||||||
# need to handle "Other" values here...
|
# need to handle "Other" values here...
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
|
|
||||||
# After splitting, check if language value is 2 or 3 characters so we
|
# After splitting, check if language value is 2 or 3 characters so we
|
||||||
# can check it against ISO 639-2 or ISO 639-3 accordingly.
|
# can check it against ISO 639-2 or ISO 639-3 accordingly.
|
||||||
if len(value) == 2:
|
if len(value) == 2:
|
||||||
if not languages.get(alpha_2=value):
|
if not languages.get(alpha_2=value):
|
||||||
print(f'Invalid ISO 639-2 language: {value}')
|
print(f"Invalid ISO 639-2 language: {value}")
|
||||||
|
|
||||||
pass
|
pass
|
||||||
elif len(value) == 3:
|
elif len(value) == 3:
|
||||||
if not languages.get(alpha_3=value):
|
if not languages.get(alpha_3=value):
|
||||||
print(f'Invalid ISO 639-3 language: {value}')
|
print(f"Invalid ISO 639-3 language: {value}")
|
||||||
|
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
print(f'Invalid language: {value}')
|
print(f"Invalid language: {value}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -220,12 +222,16 @@ def agrovoc(field, field_name):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
request_url = f'http://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search?query={value}'
|
request_url = (
|
||||||
|
f"http://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search?query={value}"
|
||||||
|
)
|
||||||
|
|
||||||
# enable transparent request cache with thirty days expiry
|
# enable transparent request cache with thirty days expiry
|
||||||
expire_after = timedelta(days=30)
|
expire_after = timedelta(days=30)
|
||||||
requests_cache.install_cache('agrovoc-response-cache', expire_after=expire_after)
|
requests_cache.install_cache(
|
||||||
|
"agrovoc-response-cache", expire_after=expire_after
|
||||||
|
)
|
||||||
|
|
||||||
request = requests.get(request_url)
|
request = requests.get(request_url)
|
||||||
|
|
||||||
@ -236,8 +242,8 @@ def agrovoc(field, field_name):
|
|||||||
data = request.json()
|
data = request.json()
|
||||||
|
|
||||||
# check if there are any results
|
# check if there are any results
|
||||||
if len(data['results']) == 0:
|
if len(data["results"]) == 0:
|
||||||
print(f'Invalid AGROVOC ({field_name}): {value}')
|
print(f"Invalid AGROVOC ({field_name}): {value}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -260,10 +266,18 @@ def filename_extension(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
values = field.split('||')
|
values = field.split("||")
|
||||||
|
|
||||||
# List of common filename extentions
|
# List of common filename extentions
|
||||||
common_filename_extensions = ['.pdf', '.doc', '.docx', '.ppt', '.pptx', '.xls', '.xlsx']
|
common_filename_extensions = [
|
||||||
|
".pdf",
|
||||||
|
".doc",
|
||||||
|
".docx",
|
||||||
|
".ppt",
|
||||||
|
".pptx",
|
||||||
|
".xls",
|
||||||
|
".xlsx",
|
||||||
|
]
|
||||||
|
|
||||||
# Iterate over all values
|
# Iterate over all values
|
||||||
for value in values:
|
for value in values:
|
||||||
@ -272,7 +286,7 @@ def filename_extension(field):
|
|||||||
|
|
||||||
for filename_extension in common_filename_extensions:
|
for filename_extension in common_filename_extensions:
|
||||||
# Check for extension at the end of the filename
|
# Check for extension at the end of the filename
|
||||||
pattern = re.escape(filename_extension) + r'$'
|
pattern = re.escape(filename_extension) + r"$"
|
||||||
match = re.search(pattern, value, re.IGNORECASE)
|
match = re.search(pattern, value, re.IGNORECASE)
|
||||||
|
|
||||||
if match is not None:
|
if match is not None:
|
||||||
@ -282,6 +296,6 @@ def filename_extension(field):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if filename_extension_match is False:
|
if filename_extension_match is False:
|
||||||
print(f'Filename with uncommon extension: {value}')
|
print(f"Filename with uncommon extension: {value}")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
@ -16,23 +16,23 @@ def whitespace(field):
|
|||||||
values = list()
|
values = list()
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
# Strip leading and trailing whitespace
|
# Strip leading and trailing whitespace
|
||||||
value = value.strip()
|
value = value.strip()
|
||||||
|
|
||||||
# Replace excessive whitespace (>2) with one space
|
# Replace excessive whitespace (>2) with one space
|
||||||
pattern = re.compile(r'\s{2,}')
|
pattern = re.compile(r"\s{2,}")
|
||||||
match = re.findall(pattern, value)
|
match = re.findall(pattern, value)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Excessive whitespace: {value}')
|
print(f"Excessive whitespace: {value}")
|
||||||
value = re.sub(pattern, ' ', value)
|
value = re.sub(pattern, " ", value)
|
||||||
|
|
||||||
# Save cleaned value
|
# Save cleaned value
|
||||||
values.append(value)
|
values.append(value)
|
||||||
|
|
||||||
# Create a new field consisting of all values joined with "||"
|
# Create a new field consisting of all values joined with "||"
|
||||||
new_field = '||'.join(values)
|
new_field = "||".join(values)
|
||||||
|
|
||||||
return new_field
|
return new_field
|
||||||
|
|
||||||
@ -48,21 +48,21 @@ def separators(field):
|
|||||||
values = list()
|
values = list()
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
for value in field.split('||'):
|
for value in field.split("||"):
|
||||||
# After splitting, see if there are any remaining "|" characters
|
# After splitting, see if there are any remaining "|" characters
|
||||||
pattern = re.compile(r'\|')
|
pattern = re.compile(r"\|")
|
||||||
match = re.findall(pattern, value)
|
match = re.findall(pattern, value)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Fixing invalid multi-value separator: {value}')
|
print(f"Fixing invalid multi-value separator: {value}")
|
||||||
|
|
||||||
value = re.sub(pattern, '||', value)
|
value = re.sub(pattern, "||", value)
|
||||||
|
|
||||||
# Save cleaned value
|
# Save cleaned value
|
||||||
values.append(value)
|
values.append(value)
|
||||||
|
|
||||||
# Create a new field consisting of all values joined with "||"
|
# Create a new field consisting of all values joined with "||"
|
||||||
new_field = '||'.join(values)
|
new_field = "||".join(values)
|
||||||
|
|
||||||
return new_field
|
return new_field
|
||||||
|
|
||||||
@ -86,36 +86,36 @@ def unnecessary_unicode(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Check for zero-width space characters (U+200B)
|
# Check for zero-width space characters (U+200B)
|
||||||
pattern = re.compile(r'\u200B')
|
pattern = re.compile(r"\u200B")
|
||||||
match = re.findall(pattern, field)
|
match = re.findall(pattern, field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Removing unnecessary Unicode (U+200B): {field}')
|
print(f"Removing unnecessary Unicode (U+200B): {field}")
|
||||||
field = re.sub(pattern, '', field)
|
field = re.sub(pattern, "", field)
|
||||||
|
|
||||||
# Check for replacement characters (U+FFFD)
|
# Check for replacement characters (U+FFFD)
|
||||||
pattern = re.compile(r'\uFFFD')
|
pattern = re.compile(r"\uFFFD")
|
||||||
match = re.findall(pattern, field)
|
match = re.findall(pattern, field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Removing unnecessary Unicode (U+FFFD): {field}')
|
print(f"Removing unnecessary Unicode (U+FFFD): {field}")
|
||||||
field = re.sub(pattern, '', field)
|
field = re.sub(pattern, "", field)
|
||||||
|
|
||||||
# Check for no-break spaces (U+00A0)
|
# Check for no-break spaces (U+00A0)
|
||||||
pattern = re.compile(r'\u00A0')
|
pattern = re.compile(r"\u00A0")
|
||||||
match = re.findall(pattern, field)
|
match = re.findall(pattern, field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Removing unnecessary Unicode (U+00A0): {field}')
|
print(f"Removing unnecessary Unicode (U+00A0): {field}")
|
||||||
field = re.sub(pattern, '', field)
|
field = re.sub(pattern, "", field)
|
||||||
|
|
||||||
# Check for soft hyphens (U+00AD), sometimes preceeded with a normal hyphen
|
# Check for soft hyphens (U+00AD), sometimes preceeded with a normal hyphen
|
||||||
pattern = re.compile(r'\u002D*?\u00AD')
|
pattern = re.compile(r"\u002D*?\u00AD")
|
||||||
match = re.findall(pattern, field)
|
match = re.findall(pattern, field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Replacing unnecessary Unicode (U+00AD): {field}')
|
print(f"Replacing unnecessary Unicode (U+00AD): {field}")
|
||||||
field = re.sub(pattern, '-', field)
|
field = re.sub(pattern, "-", field)
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ def duplicates(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Try to split multi-value field on "||" separator
|
# Try to split multi-value field on "||" separator
|
||||||
values = field.split('||')
|
values = field.split("||")
|
||||||
|
|
||||||
# Initialize an empty list to hold the de-duplicated values
|
# Initialize an empty list to hold the de-duplicated values
|
||||||
new_values = list()
|
new_values = list()
|
||||||
@ -139,10 +139,10 @@ def duplicates(field):
|
|||||||
if value not in new_values:
|
if value not in new_values:
|
||||||
new_values.append(value)
|
new_values.append(value)
|
||||||
else:
|
else:
|
||||||
print(f'Dropping duplicate value: {value}')
|
print(f"Dropping duplicate value: {value}")
|
||||||
|
|
||||||
# Create a new field consisting of all values joined with "||"
|
# Create a new field consisting of all values joined with "||"
|
||||||
new_field = '||'.join(new_values)
|
new_field = "||".join(new_values)
|
||||||
|
|
||||||
return new_field
|
return new_field
|
||||||
|
|
||||||
@ -169,11 +169,11 @@ def newlines(field):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Check for Unix line feed (LF)
|
# Check for Unix line feed (LF)
|
||||||
match = re.findall(r'\n', field)
|
match = re.findall(r"\n", field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Removing newline: {field}')
|
print(f"Removing newline: {field}")
|
||||||
field = field.replace('\n', '')
|
field = field.replace("\n", "")
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
|
||||||
@ -193,10 +193,10 @@ def comma_space(field, field_name):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Check for comma followed by a word character
|
# Check for comma followed by a word character
|
||||||
match = re.findall(r',\w', field)
|
match = re.findall(r",\w", field)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
print(f'Adding space after comma ({field_name}): {field}')
|
print(f"Adding space after comma ({field_name}): {field}")
|
||||||
field = re.sub(r',(\w)', r', \1', field)
|
field = re.sub(r",(\w)", r", \1", field)
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
@ -1 +1 @@
|
|||||||
VERSION = '0.2.2'
|
VERSION = "0.2.2"
|
||||||
|
Loading…
Reference in New Issue
Block a user