2021-03-19 15:04:13 +01:00
|
|
|
# SPDX-License-Identifier: GPL-3.0-only
|
|
|
|
|
2019-07-28 19:31:57 +02:00
|
|
|
import argparse
|
2023-10-15 22:37:38 +02:00
|
|
|
import os
|
2019-07-28 15:11:36 +02:00
|
|
|
import re
|
2019-08-03 20:11:57 +02:00
|
|
|
import signal
|
|
|
|
import sys
|
2023-10-15 22:37:38 +02:00
|
|
|
from datetime import timedelta
|
2019-07-26 21:11:10 +02:00
|
|
|
|
2019-08-29 00:15:04 +02:00
|
|
|
import pandas as pd
|
2023-10-15 22:37:38 +02:00
|
|
|
import requests_cache
|
2021-02-21 12:01:25 +01:00
|
|
|
from colorama import Fore
|
2019-08-29 00:15:04 +02:00
|
|
|
|
|
|
|
import csv_metadata_quality.check as check
|
2019-09-24 17:55:05 +02:00
|
|
|
import csv_metadata_quality.experimental as experimental
|
2019-08-29 00:15:04 +02:00
|
|
|
import csv_metadata_quality.fix as fix
|
|
|
|
from csv_metadata_quality.version import VERSION
|
|
|
|
|
2019-07-28 19:31:57 +02:00
|
|
|
|
|
|
|
def parse_args(argv):
|
2019-08-29 00:10:39 +02:00
|
|
|
parser = argparse.ArgumentParser(description="Metadata quality checker and fixer.")
|
|
|
|
parser.add_argument(
|
|
|
|
"--agrovoc-fields",
|
|
|
|
"-a",
|
2021-03-14 09:52:58 +01:00
|
|
|
help="Comma-separated list of fields to validate against AGROVOC, for example: dcterms.subject,cg.coverage.country",
|
2019-08-29 00:10:39 +02:00
|
|
|
)
|
2021-12-23 11:43:10 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--drop-invalid-agrovoc",
|
|
|
|
"-d",
|
|
|
|
help="After validating metadata values against AGROVOC, drop invalid values.",
|
|
|
|
action="store_true",
|
|
|
|
)
|
2019-09-24 17:55:05 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--experimental-checks",
|
|
|
|
"-e",
|
2020-01-15 10:41:31 +01:00
|
|
|
help="Enable experimental checks like language detection",
|
|
|
|
action="store_true",
|
2019-09-24 17:55:05 +02:00
|
|
|
)
|
2019-08-29 00:10:39 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--input-file",
|
|
|
|
"-i",
|
2022-09-02 15:14:24 +02:00
|
|
|
help="Path to input file. Must be a UTF-8 CSV.",
|
2019-08-29 00:10:39 +02:00
|
|
|
required=True,
|
|
|
|
type=argparse.FileType("r", encoding="UTF-8"),
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--output-file",
|
|
|
|
"-o",
|
|
|
|
help="Path to output file (always CSV).",
|
|
|
|
required=True,
|
|
|
|
type=argparse.FileType("w", encoding="UTF-8"),
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--unsafe-fixes", "-u", help="Perform unsafe fixes.", action="store_true"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--version", "-V", action="version", version=f"CSV Metadata Quality v{VERSION}"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--exclude-fields",
|
|
|
|
"-x",
|
2021-03-14 09:52:58 +01:00
|
|
|
help="Comma-separated list of fields to skip, for example: dc.contributor.author,dcterms.bibliographicCitation",
|
2019-08-29 00:10:39 +02:00
|
|
|
)
|
2019-07-28 19:31:57 +02:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
return args
|
|
|
|
|
|
|
|
|
2019-08-03 20:11:57 +02:00
|
|
|
def signal_handler(signal, frame):
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
2019-07-31 16:34:36 +02:00
|
|
|
def run(argv):
|
2019-07-28 19:31:57 +02:00
|
|
|
args = parse_args(argv)
|
|
|
|
|
2019-08-03 20:11:57 +02:00
|
|
|
# set the signal handler for SIGINT (^C)
|
|
|
|
signal.signal(signal.SIGINT, signal_handler)
|
|
|
|
|
2023-06-12 09:38:05 +02:00
|
|
|
# Read all fields as strings so dates don't get converted from 1998 to 1998.0
|
|
|
|
df = pd.read_csv(args.input_file, dtype_backend="pyarrow", dtype="str")
|
2019-07-26 21:11:10 +02:00
|
|
|
|
2022-09-02 09:35:04 +02:00
|
|
|
# Check if the user requested to skip any fields
|
|
|
|
if args.exclude_fields:
|
|
|
|
# Split the list of excluded fields on ',' into a list. Note that the
|
|
|
|
# user should be careful to no include spaces here.
|
|
|
|
exclude = args.exclude_fields.split(",")
|
|
|
|
else:
|
2023-11-22 19:54:50 +01:00
|
|
|
exclude = []
|
2022-09-02 09:35:04 +02:00
|
|
|
|
2023-10-15 22:37:38 +02:00
|
|
|
# enable transparent request cache with thirty days expiry
|
|
|
|
expire_after = timedelta(days=30)
|
|
|
|
# Allow overriding the location of the requests cache, just in case we are
|
|
|
|
# running in an environment where we can't write to the current working di-
|
|
|
|
# rectory (for example from csv-metadata-quality-web).
|
|
|
|
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
|
|
|
|
requests_cache.install_cache(
|
|
|
|
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
|
|
|
|
)
|
|
|
|
|
|
|
|
# prune old cache entries
|
|
|
|
requests_cache.delete()
|
|
|
|
|
2019-09-21 16:19:39 +02:00
|
|
|
for column in df.columns:
|
2022-09-02 09:35:04 +02:00
|
|
|
if column in exclude:
|
|
|
|
print(f"{Fore.YELLOW}Skipping {Fore.RESET}{column}")
|
|
|
|
|
|
|
|
continue
|
2019-08-26 23:10:07 +02:00
|
|
|
|
2019-07-30 19:05:12 +02:00
|
|
|
if args.unsafe_fixes:
|
2023-04-22 21:16:13 +02:00
|
|
|
# Skip whitespace and newline fixes on abstracts and descriptions
|
|
|
|
# because there are too many with legitimate multi-line metadata.
|
|
|
|
match = re.match(r"^.*?(abstract|description).*$", column)
|
2023-02-07 14:48:40 +01:00
|
|
|
if match is None:
|
|
|
|
# Fix: whitespace
|
|
|
|
df[column] = df[column].apply(fix.whitespace, field_name=column)
|
|
|
|
|
|
|
|
# Fix: newlines
|
|
|
|
df[column] = df[column].apply(fix.newlines, field_name=column)
|
2019-07-30 19:05:12 +02:00
|
|
|
|
2019-08-27 23:05:52 +02:00
|
|
|
# Fix: missing space after comma. Only run on author and citation
|
|
|
|
# fields for now, as this problem is mostly an issue in names.
|
|
|
|
if args.unsafe_fixes:
|
2023-03-10 14:13:16 +01:00
|
|
|
match = re.match(r"^.*?(author|[Cc]itation).*$", column)
|
2019-08-27 23:05:52 +02:00
|
|
|
if match is not None:
|
|
|
|
df[column] = df[column].apply(fix.comma_space, field_name=column)
|
|
|
|
|
2020-01-15 10:37:54 +01:00
|
|
|
# Fix: perform Unicode normalization (NFC) to convert decomposed
|
|
|
|
# characters into their canonical forms.
|
|
|
|
if args.unsafe_fixes:
|
|
|
|
df[column] = df[column].apply(fix.normalize_unicode, field_name=column)
|
|
|
|
|
2019-07-29 16:08:49 +02:00
|
|
|
# Check: suspicious characters
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.suspicious_characters, field_name=column)
|
2019-07-29 16:08:49 +02:00
|
|
|
|
2021-12-05 14:18:35 +01:00
|
|
|
# Fix: mojibake. If unsafe fixes are not enabled then we only check.
|
2021-03-19 09:22:21 +01:00
|
|
|
if args.unsafe_fixes:
|
|
|
|
df[column] = df[column].apply(fix.mojibake, field_name=column)
|
2021-12-05 14:18:35 +01:00
|
|
|
else:
|
|
|
|
df[column].apply(check.mojibake, field_name=column)
|
2021-03-19 09:22:21 +01:00
|
|
|
|
2021-12-15 12:53:25 +01:00
|
|
|
# Fix: unnecessary Unicode
|
|
|
|
df[column] = df[column].apply(fix.unnecessary_unicode)
|
|
|
|
|
2023-01-24 12:13:55 +01:00
|
|
|
# Fix: invalid and unnecessary multi-value separators. Skip the title
|
2023-02-13 08:37:33 +01:00
|
|
|
# and abstract fields because "|" is used to indicate something like
|
|
|
|
# a subtitle.
|
2023-03-10 14:12:30 +01:00
|
|
|
match = re.match(r"^.*?(abstract|[Cc]itation|title).*$", column)
|
2023-01-24 12:13:55 +01:00
|
|
|
if match is None:
|
|
|
|
df[column] = df[column].apply(fix.separators, field_name=column)
|
|
|
|
# Run whitespace fix again after fixing invalid separators
|
|
|
|
df[column] = df[column].apply(fix.whitespace, field_name=column)
|
2019-07-28 21:53:39 +02:00
|
|
|
|
2019-07-29 17:05:03 +02:00
|
|
|
# Fix: duplicate metadata values
|
2020-01-16 11:35:11 +01:00
|
|
|
df[column] = df[column].apply(fix.duplicates, field_name=column)
|
2019-07-29 17:05:03 +02:00
|
|
|
|
2021-12-23 11:43:10 +01:00
|
|
|
# Check: invalid AGROVOC subject and optionally drop them
|
2019-08-01 22:51:58 +02:00
|
|
|
if args.agrovoc_fields:
|
|
|
|
# Identify fields the user wants to validate against AGROVOC
|
2019-08-29 00:10:39 +02:00
|
|
|
for field in args.agrovoc_fields.split(","):
|
2019-08-01 22:51:58 +02:00
|
|
|
if column == field:
|
2021-12-23 11:43:10 +01:00
|
|
|
df[column] = df[column].apply(
|
|
|
|
check.agrovoc, field_name=column, drop=args.drop_invalid_agrovoc
|
|
|
|
)
|
2019-07-29 23:30:31 +02:00
|
|
|
|
2019-07-29 17:59:42 +02:00
|
|
|
# Check: invalid language
|
2019-08-29 00:10:39 +02:00
|
|
|
match = re.match(r"^.*?language.*$", column)
|
2019-07-29 17:59:42 +02:00
|
|
|
if match is not None:
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.language)
|
2019-07-29 17:59:42 +02:00
|
|
|
|
2019-07-29 15:24:35 +02:00
|
|
|
# Check: invalid ISSN
|
2019-08-29 00:10:39 +02:00
|
|
|
match = re.match(r"^.*?issn.*$", column)
|
2019-07-28 16:27:20 +02:00
|
|
|
if match is not None:
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.issn)
|
2019-07-26 22:14:10 +02:00
|
|
|
|
2019-07-29 15:24:35 +02:00
|
|
|
# Check: invalid ISBN
|
2019-08-29 00:10:39 +02:00
|
|
|
match = re.match(r"^.*?isbn.*$", column)
|
2019-07-28 16:27:20 +02:00
|
|
|
if match is not None:
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.isbn)
|
2019-07-26 22:14:10 +02:00
|
|
|
|
2019-07-29 15:24:35 +02:00
|
|
|
# Check: invalid date
|
2021-02-28 14:11:06 +01:00
|
|
|
match = re.match(r"^.*?(date|dcterms\.issued).*$", column)
|
2019-07-28 15:11:36 +02:00
|
|
|
if match is not None:
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.date, field_name=column)
|
2019-07-28 15:11:36 +02:00
|
|
|
|
2019-08-10 22:41:16 +02:00
|
|
|
# Check: filename extension
|
2019-08-29 00:10:39 +02:00
|
|
|
if column == "filename":
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.filename_extension)
|
2019-08-10 22:41:16 +02:00
|
|
|
|
2021-03-11 09:33:16 +01:00
|
|
|
# Check: SPDX license identifier
|
|
|
|
match = re.match(r"dcterms\.license.*$", column)
|
|
|
|
if match is not None:
|
2021-03-16 15:04:19 +01:00
|
|
|
df[column].apply(check.spdx_license_identifier)
|
2021-03-11 09:33:16 +01:00
|
|
|
|
2021-03-17 08:53:07 +01:00
|
|
|
### End individual column checks ###
|
|
|
|
|
|
|
|
# Check: duplicate items
|
|
|
|
# We extract just the title, type, and date issued columns to analyze
|
2021-03-21 18:47:02 +01:00
|
|
|
try:
|
|
|
|
duplicates_df = df.filter(
|
|
|
|
regex=r"dcterms\.title|dc\.title|dcterms\.type|dc\.type|dcterms\.issued|dc\.date\.issued"
|
|
|
|
)
|
|
|
|
check.duplicate_items(duplicates_df)
|
|
|
|
|
|
|
|
# Delete the temporary duplicates DataFrame
|
|
|
|
del duplicates_df
|
|
|
|
except IndexError:
|
|
|
|
pass
|
2021-03-17 08:53:07 +01:00
|
|
|
|
2019-09-24 17:55:05 +02:00
|
|
|
##
|
|
|
|
# Perform some checks on rows so we can consider items as a whole rather
|
|
|
|
# than simple on a field-by-field basis. This allows us to check whether
|
|
|
|
# the language used in the title and abstract matches the language indi-
|
|
|
|
# cated in the language field, for example.
|
|
|
|
#
|
|
|
|
# This is slower and apparently frowned upon in the Pandas community be-
|
|
|
|
# cause it requires iterating over rows rather than using apply over a
|
|
|
|
# column. For now it will have to do.
|
|
|
|
##
|
|
|
|
|
2021-10-06 20:25:39 +02:00
|
|
|
# Transpose the DataFrame so we can consider each row as a column
|
|
|
|
df_transposed = df.T
|
2019-09-24 17:55:05 +02:00
|
|
|
|
2021-10-06 20:25:39 +02:00
|
|
|
# Remember, here a "column" is an item (previously row). Perhaps I
|
|
|
|
# should rename column in this for loop...
|
|
|
|
for column in df_transposed.columns:
|
|
|
|
# Check: citation DOI
|
2022-09-02 14:59:22 +02:00
|
|
|
check.citation_doi(df_transposed[column], exclude)
|
2021-10-06 20:25:39 +02:00
|
|
|
|
2021-12-05 14:52:42 +01:00
|
|
|
# Check: title in citation
|
2022-09-02 14:59:22 +02:00
|
|
|
check.title_in_citation(df_transposed[column], exclude)
|
2021-12-05 14:52:42 +01:00
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
if args.unsafe_fixes:
|
|
|
|
# Fix: countries match regions
|
2022-09-02 14:59:22 +02:00
|
|
|
df_transposed[column] = fix.countries_match_regions(
|
|
|
|
df_transposed[column], exclude
|
|
|
|
)
|
2022-07-28 15:52:43 +02:00
|
|
|
else:
|
|
|
|
# Check: countries match regions
|
2022-09-02 14:59:22 +02:00
|
|
|
check.countries_match_regions(df_transposed[column], exclude)
|
2021-12-08 14:02:20 +01:00
|
|
|
|
2021-10-06 20:25:39 +02:00
|
|
|
if args.experimental_checks:
|
2022-09-02 14:59:22 +02:00
|
|
|
experimental.correct_language(df_transposed[column], exclude)
|
2019-09-24 17:55:05 +02:00
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
# Transpose the DataFrame back before writing. This is probably wasteful to
|
|
|
|
# do every time since we technically only need to do it if we've done the
|
|
|
|
# countries/regions fix above, but I can't think of another way for now.
|
|
|
|
df_transposed_back = df_transposed.T
|
|
|
|
|
2019-07-26 21:11:10 +02:00
|
|
|
# Write
|
2022-07-28 15:52:43 +02:00
|
|
|
df_transposed_back.to_csv(args.output_file, index=False)
|
2019-08-04 08:10:19 +02:00
|
|
|
|
|
|
|
# Close the input and output files before exiting
|
|
|
|
args.input_file.close()
|
|
|
|
args.output_file.close()
|
2019-08-04 08:10:37 +02:00
|
|
|
|
|
|
|
sys.exit(0)
|