1
0
mirror of https://github.com/ilri/csv-metadata-quality.git synced 2025-05-09 22:56:01 +02:00

1 Commits

Author SHA1 Message Date
0d719cf42a .drone.yml: add Python 3.11
All checks were successful
continuous-integration/drone/push Build is passing
2022-10-25 10:47:16 +03:00
26 changed files with 1109 additions and 10124 deletions

View File

@ -9,24 +9,24 @@ steps:
commands:
- id
- python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
- python -m pip install poetry
- poetry install
- poetry run pytest
- apt update && apt install -y gcc g++ libicu-dev pkg-config
- pip install -r requirements-dev.txt
- pytest
- python setup.py install
# Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
---
kind: pipeline
@ -39,24 +39,24 @@ steps:
commands:
- id
- python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
- python -m pip install poetry
- poetry install
- poetry run pytest
- apt update && apt install -y gcc g++ libicu-dev pkg-config
- pip install -r requirements-dev.txt
- pytest
- python setup.py install
# Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
---
kind: pipeline
@ -69,23 +69,53 @@ steps:
commands:
- id
- python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
- python -m pip install poetry
- poetry install
- poetry run pytest
- apt update && apt install -y gcc g++ libicu-dev pkg-config
- pip install -r requirements-dev.txt
- pytest
- python setup.py install
# Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
---
kind: pipeline
type: docker
name: python38
steps:
- name: test
image: python:3.8-slim
commands:
- id
- python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config
- pip install -r requirements-dev.txt
- pytest
- python setup.py install
# Basic test
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
# vim: ts=2 sw=2 et

View File

@ -15,27 +15,37 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Install rye
uses: eifinger/setup-rye@v4
- uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
version: 'latest'
- run: rye sync
- name: Lint
python-version: '3.10'
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
if [ -f requirements-dev.txt ]; then pip install -r requirements-dev.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
rye lint
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: rye test
run: |
pytest
- name: Test CLI
run: |
python setup.py install
# Basic test
rye run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Test with unsafe fixes
rye run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Test with experimental checks
rye run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
rye run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
rye run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d

View File

@ -1 +0,0 @@
3.12

View File

@ -4,44 +4,7 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
### Added
- Ability to normalize DOIs to https://doi.org URI format
### Fixed
- Fixed regex so we don't run the invalid multi-value separator fix on
`dcterms.bibliographicCitation` fields
- Fixed regex so we run the comma space fix on `dcterms.bibliographicCitation`
fields
- Don't crash the country/region checker/fixer when a title field is missing
### Changed
- Don't run newline fix on description fields
- Install requests-cache in main run() function instead of check.agrovoc() function so we only incur the overhead once
- Use py3langid instead of langid, see: [How to make language detection with langid.py faster](https://adrien.barbaresi.eu/blog/language-detection-langid-py-faster.html)
### Updated
- Python dependencies, including Pandas 2.0.0 and [Arrow-backed dtypes](https://datapythonista.me/blog/pandas-20-and-the-arrow-revolution-part-i)
- SPDX license list
## [0.6.1] - 2023-02-23
### Fixed
- Missing region check should ignore subregion field, if it exists
### Changed
- Use SPDX license data from SPDX themselves instead of spdx-license-list
because it is deprecated and outdated
- Require Python 3.9+
- Don't run `fix.separators()` on title or abstract fields
- Don't run whitespace or newline fixes on abstract fields
- Ignore some common non-SPDX licenses
- Ignore `__description` suffix in filenames meant for SAFBuilder when checking
for uncommon file extensions
### Updated
- Python dependencies
## [0.6.0] - 2022-09-02
## [0.6.0] = 2022-09-02
### Changed
- Perform fix for "unnecessary" Unicode characters after we try to fix encoding
issues with ftfy

View File

@ -1 +0,0 @@
include csv_metadata_quality/data/licenses.json

View File

@ -8,7 +8,7 @@
A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem (though it could theoretically work on any CSV that uses Dublin Core fields as columns). The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, unnecessary Unicode, AGROVOC terms, etc.
Requires Python 3.9 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library.
Requires Python 3.8 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library.
If you use the DSpace CSV metadata quality checker please cite:
@ -31,7 +31,6 @@ If you use the DSpace CSV metadata quality checker please cite:
- Check for countries with missing regions (and attempt to fix with `--unsafe-fixes`)
- Remove duplicate metadata values
- Check for duplicate items, using the title, type, and date issued as an indicator
- [Normalize DOIs](https://www.crossref.org/documentation/member-setup/constructing-your-dois/) to https://doi.org URI format
## Installation
The easiest way to install CSV Metadata Quality is with [poetry](https://python-poetry.org):
@ -126,7 +125,9 @@ This currently uses the [Python langid](https://github.com/saffsd/langid.py) lib
- Better logging, for example with INFO, WARN, and ERR levels
- Verbose, debug, or quiet options
- Warn if an author is shorter than 3 characters?
- Validate DOIs? Normalize to https://doi.org format? Or use just the DOI part: 10.1016/j.worlddev.2010.06.006
- Warn if two items use the same file in `filename` column
- Add an option to drop invalid AGROVOC subjects?
- Add tests for application invocation, ie `tests/test_app.py`?
- Validate ISSNs or journal titles against CrossRef API?
- Add configurable field validation, like specify a field name and a validation file?
@ -136,7 +137,7 @@ This currently uses the [Python langid](https://github.com/saffsd/langid.py) lib
- Warn if item is Open Access, but missing a license
- Warn if item has an ISSN but no journal title
- Update journal titles from ISSN
- Migrate from Pandas to Polars
- Migrate to https://github.com/spdx/license-list-data
## License
This work is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.en.html).

View File

@ -1,14 +1,11 @@
# SPDX-License-Identifier: GPL-3.0-only
import argparse
import os
import re
import signal
import sys
from datetime import timedelta
import pandas as pd
import requests_cache
from colorama import Fore
import csv_metadata_quality.check as check
@ -77,7 +74,7 @@ def run(argv):
signal.signal(signal.SIGINT, signal_handler)
# Read all fields as strings so dates don't get converted from 1998 to 1998.0
df = pd.read_csv(args.input_file, dtype_backend="pyarrow", dtype="str")
df = pd.read_csv(args.input_file, dtype=str)
# Check if the user requested to skip any fields
if args.exclude_fields:
@ -85,20 +82,7 @@ def run(argv):
# user should be careful to no include spaces here.
exclude = args.exclude_fields.split(",")
else:
exclude = []
# enable transparent request cache with thirty days expiry
expire_after = timedelta(days=30)
# Allow overriding the location of the requests cache, just in case we are
# running in an environment where we can't write to the current working di-
# rectory (for example from csv-metadata-quality-web).
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
requests_cache.install_cache(
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
)
# prune old cache entries
requests_cache.delete()
exclude = list()
for column in df.columns:
if column in exclude:
@ -106,21 +90,17 @@ def run(argv):
continue
if args.unsafe_fixes:
# Skip whitespace and newline fixes on abstracts and descriptions
# because there are too many with legitimate multi-line metadata.
match = re.match(r"^.*?(abstract|description).*$", column)
if match is None:
# Fix: whitespace
df[column] = df[column].apply(fix.whitespace, field_name=column)
# Fix: whitespace
df[column] = df[column].apply(fix.whitespace, field_name=column)
# Fix: newlines
df[column] = df[column].apply(fix.newlines, field_name=column)
# Fix: newlines
if args.unsafe_fixes:
df[column] = df[column].apply(fix.newlines, field_name=column)
# Fix: missing space after comma. Only run on author and citation
# fields for now, as this problem is mostly an issue in names.
if args.unsafe_fixes:
match = re.match(r"^.*?(author|[Cc]itation).*$", column)
match = re.match(r"^.*?(author|citation).*$", column)
if match is not None:
df[column] = df[column].apply(fix.comma_space, field_name=column)
@ -141,19 +121,10 @@ def run(argv):
# Fix: unnecessary Unicode
df[column] = df[column].apply(fix.unnecessary_unicode)
# Fix: normalize DOIs
match = re.match(r"^.*?identifier\.doi.*$", column)
if match is not None:
df[column] = df[column].apply(fix.normalize_dois)
# Fix: invalid and unnecessary multi-value separators. Skip the title
# and abstract fields because "|" is used to indicate something like
# a subtitle.
match = re.match(r"^.*?(abstract|[Cc]itation|title).*$", column)
if match is None:
df[column] = df[column].apply(fix.separators, field_name=column)
# Run whitespace fix again after fixing invalid separators
df[column] = df[column].apply(fix.whitespace, field_name=column)
# Fix: invalid and unnecessary multi-value separators
df[column] = df[column].apply(fix.separators, field_name=column)
# Run whitespace fix again after fixing invalid separators
df[column] = df[column].apply(fix.whitespace, field_name=column)
# Fix: duplicate metadata values
df[column] = df[column].apply(fix.duplicates, field_name=column)

View File

@ -1,18 +1,21 @@
# SPDX-License-Identifier: GPL-3.0-only
import logging
import os
import re
from datetime import datetime
from datetime import datetime, timedelta
import country_converter as coco
import pandas as pd
import requests
import requests_cache
import spdx_license_list
from colorama import Fore
from pycountry import languages
from stdnum import isbn as stdnum_isbn
from stdnum import issn as stdnum_issn
from csv_metadata_quality.util import is_mojibake, load_spdx_licenses
from csv_metadata_quality.util import is_mojibake
def issn(field):
@ -31,6 +34,7 @@ def issn(field):
# Try to split multi-value field on "||" separator
for value in field.split("||"):
if not stdnum_issn.is_valid(value):
print(f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}")
@ -53,6 +57,7 @@ def isbn(field):
# Try to split multi-value field on "||" separator
for value in field.split("||"):
if not stdnum_isbn.is_valid(value):
print(f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}")
@ -133,7 +138,7 @@ def suspicious_characters(field, field_name):
return
# List of suspicious characters, for example: ́ˆ~`
suspicious_characters = ["\u00b4", "\u02c6", "\u007e", "\u0060"]
suspicious_characters = ["\u00B4", "\u02C6", "\u007E", "\u0060"]
for character in suspicious_characters:
# Find the position of the suspicious character in the string
@ -169,6 +174,7 @@ def language(field):
# Try to split multi-value field on "||" separator
for value in field.split("||"):
# After splitting, check if language value is 2 or 3 characters so we
# can check it against ISO 639-1 or ISO 639-3 accordingly.
if len(value) == 2:
@ -201,12 +207,25 @@ def agrovoc(field, field_name, drop):
if pd.isna(field):
return
# enable transparent request cache with thirty days expiry
expire_after = timedelta(days=30)
# Allow overriding the location of the requests cache, just in case we are
# running in an environment where we can't write to the current working di-
# rectory (for example from csv-metadata-quality-web).
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
requests_cache.install_cache(
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
)
# prune old cache entries
# requests_cache.remove_expired_responses()
# Initialize an empty list to hold the validated AGROVOC values
values = []
values = list()
# Try to split multi-value field on "||" separator
for value in field.split("||"):
request_url = "https://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search"
request_url = "http://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search"
request_params = {"query": value}
request = requests.get(request_url, params=request_params)
@ -268,11 +287,6 @@ def filename_extension(field):
# Iterate over all values
for value in values:
# Strip filename descriptions that are meant for SAF Bundler, for
# example: Annual_Report_2020.pdf__description:Report
if "__description" in value:
value = value.split("__")[0]
# Assume filename extension does not match
filename_extension_match = False
@ -299,26 +313,13 @@ def spdx_license_identifier(field):
Prints the value if it is invalid.
"""
# List of common non-SPDX licenses to ignore
# See: https://ilri.github.io/cgspace-submission-guidelines/dcterms-license/dcterms-license.txt
ignore_licenses = {
"All rights reserved; no re-use allowed",
"All rights reserved; self-archive copy only",
"Copyrighted; Non-commercial educational use only",
"Copyrighted; Non-commercial use only",
"Copyrighted; all rights reserved",
"Other",
}
# Skip fields with missing values
if pd.isna(field) or field in ignore_licenses:
if pd.isna(field):
return
spdx_licenses = load_spdx_licenses()
# Try to split multi-value field on "||" separator
for value in field.split("||"):
if value not in spdx_licenses:
if value not in spdx_license_list.LICENSES:
print(f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{value}")
return
@ -358,7 +359,7 @@ def duplicate_items(df):
if items_count_unique < items_count_total:
# Create a list to hold our items while we check for duplicates
items = []
items = list()
for index, row in df.iterrows():
item_title_type_date = f"{row[title_column_name]}{row[type_column_name]}{row[date_column_name]}"
@ -511,9 +512,9 @@ def countries_match_regions(row, exclude):
if match is not None:
country_column_name = label
# Find the name of the region column, but make sure it's not subregion!
# Find the name of the region column
match = re.match(r"^.*?region.*$", label)
if match is not None and "sub" not in label:
if match is not None:
region_column_name = label
# Find the name of the title column
@ -539,7 +540,7 @@ def countries_match_regions(row, exclude):
if row[region_column_name] is not None:
regions = row[region_column_name].split("||")
else:
regions = []
regions = list()
for country in countries:
# Look up the UN M.49 regions for this country code. CoCo seems to
@ -548,13 +549,8 @@ def countries_match_regions(row, exclude):
un_region = cc.convert(names=country, to="UNRegion")
if un_region != "not found" and un_region not in regions:
try:
print(
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}{row[title_column_name]}"
)
except KeyError:
print(
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}<title field not present>"
)
print(
f"{Fore.YELLOW}Missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
)
return

File diff suppressed because it is too large Load Diff

View File

@ -2,8 +2,8 @@
import re
import langid
import pandas as pd
import py3langid as langid
from colorama import Fore
from pycountry import languages
@ -20,7 +20,7 @@ def correct_language(row, exclude):
# Initialize some variables at global scope so that we can set them in the
# loop scope below and still be able to access them afterwards.
language = ""
sample_strings = []
sample_strings = list()
title = None
# Iterate over the labels of the current row's values. Before we transposed

View File

@ -23,7 +23,7 @@ def whitespace(field, field_name):
return
# Initialize an empty list to hold the cleaned values
values = []
values = list()
# Try to split multi-value field on "||" separator
for value in field.split("||"):
@ -64,7 +64,7 @@ def separators(field, field_name):
return
# Initialize an empty list to hold the cleaned values
values = []
values = list()
# Try to split multi-value field on "||" separator
for value in field.split("||"):
@ -175,7 +175,7 @@ def duplicates(field, field_name):
values = field.split("||")
# Initialize an empty list to hold the de-duplicated values
new_values = []
new_values = list()
# Iterate over all values
for value in values:
@ -327,9 +327,9 @@ def countries_match_regions(row, exclude):
if match is not None:
country_column_name = label
# Find the name of the region column, but make sure it's not subregion!
# Find the name of the region column
match = re.match(r"^.*?region.*$", label)
if match is not None and "sub" not in label:
if match is not None:
region_column_name = label
# Find the name of the title column
@ -355,10 +355,10 @@ def countries_match_regions(row, exclude):
if row[region_column_name] is not None:
regions = row[region_column_name].split("||")
else:
regions = []
regions = list()
# An empty list for our regions so we can keep track for all countries
missing_regions = []
missing_regions = list()
for country in countries:
# Look up the UN M.49 regions for this country code. CoCo seems to
@ -370,17 +370,9 @@ def countries_match_regions(row, exclude):
# it doesn't already exist in regions.
if un_region != "not found" and un_region not in regions:
if un_region not in missing_regions:
try:
print(
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
)
except KeyError:
# If there is no title column in the CSV we will print
# the fix without the title instead of crashing.
print(
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}<title field not present>"
)
print(
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
)
missing_regions.append(un_region)
if len(missing_regions) > 0:
@ -395,88 +387,3 @@ def countries_match_regions(row, exclude):
row[region_column_name] = "||".join(missing_regions)
return row
def normalize_dois(field):
"""Normalize DOIs.
DOIs are meant to be globally unique identifiers. They are case insensitive,
but in order to compare them robustly they should be normalized to a common
format:
- strip leading and trailing whitespace
- lowercase all ASCII characters
- convert all variations to https://doi.org/10.xxxx/xxxx URI format
Return string with normalized DOI.
See: https://www.crossref.org/documentation/member-setup/constructing-your-dois/
"""
# Skip fields with missing values
if pd.isna(field):
return
# Try to split multi-value field on "||" separator
values = field.split("||")
# Initialize an empty list to hold the de-duplicated values
new_values = []
# Iterate over all values (most items will only have one DOI)
for value in values:
# Strip leading and trailing whitespace
new_value = value.strip()
new_value = new_value.lower()
# Convert to HTTPS
pattern = re.compile(r"^http://")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "https://", new_value)
# Convert dx.doi.org to doi.org
pattern = re.compile(r"dx\.doi\.org")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "doi.org", new_value)
# Convert www.doi.org to doi.org
pattern = re.compile(r"www\.doi\.org")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "doi.org", new_value)
# Convert erroneous %2f to /
pattern = re.compile("%2f")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "/", new_value)
# Replace values like doi: 10.11648/j.jps.20140201.14
pattern = re.compile(r"^doi: 10\.")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
# Replace values like 10.3390/foods12010115
pattern = re.compile(r"^10\.")
match = re.findall(pattern, new_value)
if match:
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
if new_value != value:
print(f"{Fore.GREEN}Normalized DOI: {Fore.RESET}{value}")
new_values.append(new_value)
new_field = "||".join(new_values)
return new_field

View File

@ -1,9 +1,5 @@
# SPDX-License-Identifier: GPL-3.0-only
import json
import os
from ftfy.badness import is_bad
@ -53,13 +49,3 @@ def is_mojibake(field):
else:
# Encodable as CP-1252, Mojibake alert level high
return True
def load_spdx_licenses():
"""Returns a Python list of SPDX short license identifiers."""
with open(os.path.join(os.path.dirname(__file__), "data/licenses.json")) as f:
licenses = json.load(f)
# List comprehension to extract the license ID for each license
return [license["licenseId"] for license in licenses["licenses"]]

View File

@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-3.0-only
VERSION = "0.6.1"
VERSION = "0.6.0"

View File

@ -1,17 +0,0 @@
id,dc.title,dcterms.abstract
1,Normal item,This is an abstract
2,Leading whitespace, This is an abstract
3,Trailing whitespace,This is an abstract
4,Consecutive whitespace,This is an abstract
5,Newline,"This
is an abstract"
6,Newline with leading whitespace," This
is an abstract"
7,Newline with trailing whitespace,"This
is an abstract "
8,Newline with consecutive whitespace,"This
is an abstract"
9,Multiple newlines,"This
is
an
abstract"
1 id dc.title dcterms.abstract
2 1 Normal item This is an abstract
3 2 Leading whitespace This is an abstract
4 3 Trailing whitespace This is an abstract
5 4 Consecutive whitespace This is an abstract
6 5 Newline This is an abstract
7 6 Newline with leading whitespace This is an abstract
8 7 Newline with trailing whitespace This is an abstract
9 8 Newline with consecutive whitespace This is an abstract
10 9 Multiple newlines This is an abstract

View File

@ -1,43 +1,38 @@
dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region,cg.coverage.subregion
Leading space,2019-07-29,,,,,,,,,,,,
Trailing space ,2019-07-29,,,,,,,,,,,,
Excessive space,2019-07-29,,,,,,,,,,,,
Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,,
Duplicate||Duplicate,2019-07-29,,,,,,,,,,,,
Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,,
Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,,
Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,,
Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,,
Invalid date,2019-07-260,,,,,,,,,,,,
Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,,
Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,,
Unnecessary Unicode,2019-07-29,,,,,,,,,,,,
Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,,
Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,,
Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,,
Invalid language,2019-07-29,,,Span,,,,,,,,,
Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,,
dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region
Leading space,2019-07-29,,,,,,,,,,,
Trailing space ,2019-07-29,,,,,,,,,,,
Excessive space,2019-07-29,,,,,,,,,,,
Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,
Duplicate||Duplicate,2019-07-29,,,,,,,,,,,
Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,
Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,
Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,
Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,
Invalid date,2019-07-260,,,,,,,,,,,
Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,
Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,
Unnecessary Unicode,2019-07-29,,,,,,,,,,,
Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,
Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,
Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,
Invalid language,2019-07-29,,,Span,,,,,,,,
Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,
Newline (LF),2019-07-30,,,,"TANZA
NIA",,,,,,,,
Missing date,,,,,,,,,,,,,
Invalid country,2019-08-01,,,,,KENYAA,,,,,,,
Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,,
Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-­92-­9043-­823-­6,,,,,,,,,,
"Missing space,after comma",2019-08-27,,,,,,,,,,,,
Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,,
Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,,
Composéd Unicode,2020-01-14,,,,,,,,,,,,
Decomposéd Unicode,2020-01-14,,,,,,,,,,,,
Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,,
Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,,
Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,,
"DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,,
Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,,
Country missing region,2021-12-08,,,,,Kenya,,,,,,,
Subregion field shouldnt trigger region checks,2022-12-07,,,,,Kenya,,,,,,Eastern Africa,Baringo
DOI with HTTP and dx.doi.org,2024-04-23,,,,,,,,,,http://dx.doi.org/10.1016/j.envc.2023.100794,,
DOI with colon,2024-04-23,,,,,,,,,,doi: 10.11648/j.jps.20140201.14,,
Upper case bare DOI,2024-04-23,,,,,,,,,,10.19103/AS.2018.0043.16,,
DOI with %2f,2024-06-25,,,,,,,,,,https://doi.org/10.1016%2fj.envc.2023.100794,,
NIA",,,,,,,
Missing date,,,,,,,,,,,,
Invalid country,2019-08-01,,,,,KENYAA,,,,,,
Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,
Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-­92-­9043-­823-­6,,,,,,,,,
"Missing space,after comma",2019-08-27,,,,,,,,,,,
Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,
Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,
Composéd Unicode,2020-01-14,,,,,,,,,,,
Decomposéd Unicode,2020-01-14,,,,,,,,,,,
Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,
Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,
Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,
"DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,
Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,
Country missing region,2021-12-08,,,,,Kenya,,,,,,

1 dc.title dcterms.issued dc.identifier.issn dc.identifier.isbn dcterms.language dcterms.subject cg.coverage.country filename dcterms.license dcterms.type dcterms.bibliographicCitation cg.identifier.doi cg.coverage.region cg.coverage.subregion
2 Leading space 2019-07-29
3 Trailing space 2019-07-29
4 Excessive space 2019-07-29
5 Miscellaenous ||whitespace | issues 2019-07-29
6 Duplicate||Duplicate 2019-07-29
7 Invalid ISSN 2019-07-29 2321-2302
8 Invalid ISBN 2019-07-29 978-0-306-40615-6
9 Multiple valid ISSNs 2019-07-29 0378-5955||0024-9319
10 Multiple valid ISBNs 2019-07-29 99921-58-10-7||978-0-306-40615-7
11 Invalid date 2019-07-260
12 Multiple dates 2019-07-26||2019-01-10
13 Invalid multi-value separator 2019-07-29 0378-5955|0024-9319
14 Unnecessary Unicode​ 2019-07-29
15 Suspicious character||foreˆt 2019-07-29
16 Invalid ISO 639-1 (alpha 2) language 2019-07-29 jp
17 Invalid ISO 639-3 (alpha 3) language 2019-07-29 chi
18 Invalid language 2019-07-29 Span
19 Invalid AGROVOC subject 2019-07-29 LIVESTOCK||FOREST
20 Newline (LF) 2019-07-30 TANZA NIA
21 Missing date
22 Invalid country 2019-08-01 KENYAA
23 Uncommon filename extension 2019-08-10 file.pdf.lck
24 Unneccesary unicode (U+002D + U+00AD) 2019-08-10 978-­92-­9043-­823-­6
25 Missing space,after comma 2019-08-27
26 Incorrect ISO 639-1 language 2019-09-26 es
27 Incorrect ISO 639-3 language 2019-09-26 spa
28 Composéd Unicode 2020-01-14
29 Decomposéd Unicode 2020-01-14
30 Unnecessary multi-value separator 2021-01-03 0378-5955||
31 Invalid SPDX license identifier 2021-03-11 CC-BY
32 Duplicate Title 2021-03-17 Report
33 Duplicate Title 2021-03-17 Report
34 Mojibake 2021-03-18 Publicaçao CIAT Report
35 DOI in citation, but missing cg.identifier.doi 2021-10-06 Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218
36 Title missing from citation 2021-12-05 Orth, A. 2021. Title missing f rom citation.
37 Country missing region 2021-12-08 Kenya
38 Baringo
DOI with HTTP and dx.doi.org 2024-04-23 http://dx.doi.org/10.1016/j.envc.2023.100794
DOI with colon 2024-04-23 doi: 10.11648/j.jps.20140201.14
Upper case bare DOI 2024-04-23 10.19103/AS.2018.0043.16
DOI with %2f 2024-06-25 https://doi.org/10.1016%2fj.envc.2023.100794

2133
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +1,36 @@
[project]
[tool.poetry]
name = "csv-metadata-quality"
version = "0.6.1"
version = "0.6.0"
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem."
authors = [
{ name = "Alan Orth", email = "alan.orth@gmail.com" }
]
license= { file = "LICENSE.txt" }
dependencies = [
"pandas[feather,performance]~=2.2",
"python-stdnum~=1.20",
"requests~=2.32",
"requests-cache~=1.2.1",
"colorama~=0.4",
"ftfy~=6.2.0",
"country-converter~=1.2",
"pycountry~=24.6.1",
"py3langid~=0.3",
]
readme = "README.md"
requires-python = ">= 3.9"
classifiers = [
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
]
[project.urls]
authors = ["Alan Orth <alan.orth@gmail.com>"]
license="GPL-3.0-only"
repository = "https://github.com/ilri/csv-metadata-quality"
homepage = "https://github.com/ilri/csv-metadata-quality"
[project.scripts]
[tool.poetry.scripts]
csv-metadata-quality = 'csv_metadata_quality.__main__:main'
# So rye doesn't fall back to setuptools
# See: https://packaging.python.org/en/latest/tutorials/packaging-projects/#choosing-build-backend
[tool.poetry.dependencies]
python = "^3.8"
pandas = "^1.4.0"
python-stdnum = "^1.13"
requests = "^2.28.1"
requests-cache = "^0.9.6"
pycountry = "^22.3.5"
langid = "^1.1.6"
colorama = "^0.4.5"
spdx-license-list = "^0.5.2"
ftfy = "^6.1.1"
country-converter = "^0.7.7"
[tool.poetry.dev-dependencies]
pytest = "^7.1.3"
flake8 = "^5.0.4"
pytest-clarity = "^1.0.1"
black = "^22.8.0"
isort = "^5.10.1"
csvkit = "^1.0.7"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = [
"pytest~=8.3",
"pytest-clarity~=1.0",
"isort~=5.13",
"csvkit~=2.0",
"ipython~=8.26",
"fixit~=2.1",
]
# So hatch doesn't try to build other top-level directories like "data"
[tool.hatch.build.targets.wheel]
packages = ["csv_metadata_quality"]
[tool.isort]
profile = "black"
line_length=88
requires = ["poetry>=0.12"]
build-backend = "poetry.masonry.api"

View File

@ -1,9 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
],
"pip_requirements": {
"enabled": false
}
}

View File

@ -1,188 +0,0 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
# universal: false
-e file:.
agate==1.10.2
# via agate-dbf
# via agate-excel
# via agate-sql
# via csvkit
agate-dbf==0.2.3
# via csvkit
agate-excel==0.4.1
# via csvkit
agate-sql==0.7.2
# via csvkit
asttokens==2.4.1
# via stack-data
attrs==23.2.0
# via cattrs
# via requests-cache
babel==2.15.0
# via agate
bottleneck==1.3.8
# via pandas
cattrs==23.2.3
# via requests-cache
certifi==2024.2.2
# via requests
charset-normalizer==3.3.2
# via requests
click==8.1.7
# via fixit
# via moreorless
colorama==0.4.6
# via csv-metadata-quality
country-converter==1.2
# via csv-metadata-quality
csvkit==2.0.1
dbfread==2.0.7
# via agate-dbf
decorator==5.1.1
# via ipython
et-xmlfile==1.1.0
# via openpyxl
executing==2.0.1
# via stack-data
fixit==2.1.0
ftfy==6.2.0
# via csv-metadata-quality
greenlet==3.0.3
# via sqlalchemy
idna==3.7
# via requests
iniconfig==2.0.0
# via pytest
ipython==8.26.0
isodate==0.6.1
# via agate
isort==5.13.2
jedi==0.19.1
# via ipython
leather==0.4.0
# via agate
libcst==1.4.0
# via fixit
llvmlite==0.43.0
# via numba
markdown-it-py==3.0.0
# via rich
matplotlib-inline==0.1.7
# via ipython
mdurl==0.1.2
# via markdown-it-py
moreorless==0.4.0
# via fixit
numba==0.60.0
# via pandas
numexpr==2.10.0
# via pandas
numpy==2.0.0
# via bottleneck
# via numba
# via numexpr
# via pandas
# via py3langid
# via pyarrow
olefile==0.47
# via agate-excel
openpyxl==3.1.2
# via agate-excel
# via csvkit
packaging==24.0
# via fixit
# via pytest
pandas==2.2.2
# via country-converter
# via csv-metadata-quality
parsedatetime==2.6
# via agate
parso==0.8.4
# via jedi
pathspec==0.12.1
# via trailrunner
pexpect==4.9.0
# via ipython
platformdirs==4.2.2
# via requests-cache
pluggy==1.5.0
# via pytest
pprintpp==0.4.0
# via pytest-clarity
prompt-toolkit==3.0.43
# via ipython
ptyprocess==0.7.0
# via pexpect
pure-eval==0.2.2
# via stack-data
py3langid==0.3.0
# via csv-metadata-quality
pyarrow==16.1.0
# via pandas
pycountry==24.6.1
# via csv-metadata-quality
pygments==2.18.0
# via ipython
# via rich
pytest==8.3.2
# via pytest-clarity
pytest-clarity==1.0.1
python-dateutil==2.9.0.post0
# via pandas
python-slugify==8.0.4
# via agate
python-stdnum==1.20
# via csv-metadata-quality
pytimeparse==1.1.8
# via agate
pytz==2024.1
# via pandas
pyyaml==6.0.1
# via libcst
requests==2.32.2
# via csv-metadata-quality
# via requests-cache
requests-cache==1.2.1
# via csv-metadata-quality
rich==13.7.1
# via pytest-clarity
six==1.16.0
# via asttokens
# via isodate
# via python-dateutil
# via url-normalize
sqlalchemy==2.0.30
# via agate-sql
# via csvkit
stack-data==0.6.3
# via ipython
text-unidecode==1.3
# via python-slugify
trailrunner==1.4.0
# via fixit
traitlets==5.14.3
# via ipython
# via matplotlib-inline
typing-extensions==4.11.0
# via sqlalchemy
tzdata==2024.1
# via pandas
url-normalize==1.4.3
# via requests-cache
urllib3==2.2.1
# via requests
# via requests-cache
wcwidth==0.2.13
# via ftfy
# via prompt-toolkit
xlrd==2.0.1
# via agate-excel
# via csvkit

68
requirements-dev.txt Normal file
View File

@ -0,0 +1,68 @@
agate-dbf==0.2.2 ; python_version >= "3.8" and python_version < "4.0"
agate-excel==0.2.5 ; python_version >= "3.8" and python_version < "4.0"
agate-sql==0.5.8 ; python_version >= "3.8" and python_version < "4.0"
agate==1.6.3 ; python_version >= "3.8" and python_version < "4.0"
appdirs==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
attrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
babel==2.10.3 ; python_version >= "3.8" and python_version < "4.0"
black==22.8.0 ; python_version >= "3.8" and python_version < "4.0"
cattrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
certifi==2022.6.15 ; python_version >= "3.8" and python_version < "4"
charset-normalizer==2.1.1 ; python_version >= "3.8" and python_version < "4"
click==8.1.3 ; python_version >= "3.8" and python_version < "4.0"
colorama==0.4.5 ; python_version >= "3.8" and python_version < "4.0"
commonmark==0.9.1 ; python_version >= "3.8" and python_version < "4.0"
country-converter==0.7.7 ; python_version >= "3.8" and python_version < "4.0"
csvkit==1.0.7 ; python_version >= "3.8" and python_version < "4.0"
dbfread==2.0.7 ; python_version >= "3.8" and python_version < "4.0"
et-xmlfile==1.1.0 ; python_version >= "3.8" and python_version < "4.0"
exceptiongroup==1.0.0rc9 ; python_version >= "3.8" and python_version <= "3.10"
flake8==5.0.4 ; python_version >= "3.8" and python_version < "4.0"
ftfy==6.1.1 ; python_version >= "3.8" and python_version < "4"
future==0.18.2 ; python_version >= "3.8" and python_version < "4.0"
greenlet==1.1.3 ; python_version >= "3.8" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0"
idna==3.3 ; python_version >= "3.8" and python_version < "4"
iniconfig==1.1.1 ; python_version >= "3.8" and python_version < "4.0"
isodate==0.6.1 ; python_version >= "3.8" and python_version < "4.0"
isort==5.10.1 ; python_version >= "3.8" and python_version < "4.0"
langid==1.1.6 ; python_version >= "3.8" and python_version < "4.0"
leather==0.3.4 ; python_version >= "3.8" and python_version < "4.0"
mccabe==0.7.0 ; python_version >= "3.8" and python_version < "4.0"
mypy-extensions==0.4.3 ; python_version >= "3.8" and python_version < "4.0"
numpy==1.23.2 ; python_version < "4.0" and python_version >= "3.8"
olefile==0.46 ; python_version >= "3.8" and python_version < "4.0"
openpyxl==3.0.10 ; python_version >= "3.8" and python_version < "4.0"
packaging==21.3 ; python_version >= "3.8" and python_version < "4.0"
pandas==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
parsedatetime==2.4 ; python_version >= "3.8" and python_version < "4.0"
pathspec==0.10.1 ; python_version >= "3.8" and python_version < "4.0"
platformdirs==2.5.2 ; python_version >= "3.8" and python_version < "4.0"
pluggy==1.0.0 ; python_version >= "3.8" and python_version < "4.0"
pprintpp==0.4.0 ; python_version >= "3.8" and python_version < "4.0"
py==1.11.0 ; python_version >= "3.8" and python_version < "4.0"
pycodestyle==2.9.1 ; python_version >= "3.8" and python_version < "4.0"
pycountry==22.3.5 ; python_version >= "3.8" and python_version < "4"
pyflakes==2.5.0 ; python_version >= "3.8" and python_version < "4.0"
pygments==2.13.0 ; python_version >= "3.8" and python_version < "4.0"
pyparsing==3.0.9 ; python_version >= "3.8" and python_version < "4.0"
pytest-clarity==1.0.1 ; python_version >= "3.8" and python_version < "4.0"
pytest==7.1.3 ; python_version >= "3.8" and python_version < "4.0"
python-dateutil==2.8.2 ; python_version >= "3.8" and python_version < "4.0"
python-slugify==6.1.2 ; python_version >= "3.8" and python_version < "4.0"
python-stdnum==1.17 ; python_version >= "3.8" and python_version < "4.0"
pytimeparse==1.1.8 ; python_version >= "3.8" and python_version < "4.0"
pytz==2022.2.1 ; python_version >= "3.8" and python_version < "4.0"
requests-cache==0.9.6 ; python_version >= "3.8" and python_version < "4.0"
requests==2.28.1 ; python_version >= "3.8" and python_version < "4"
rich==12.5.1 ; python_version >= "3.8" and python_version < "4.0"
setuptools==65.3.0 ; python_version >= "3.8" and python_version < "4"
six==1.16.0 ; python_version >= "3.8" and python_version < "4.0"
spdx-license-list==0.5.2 ; python_version >= "3.8" and python_version < "4.0"
sqlalchemy==1.4.40 ; python_version >= "3.8" and python_version < "4.0"
text-unidecode==1.3 ; python_version >= "3.8" and python_version < "4.0"
tomli==2.0.1 ; python_version >= "3.8" and python_version < "4.0"
typing-extensions==4.3.0 ; python_version >= "3.8" and python_version < "3.10"
url-normalize==1.4.3 ; python_version >= "3.8" and python_version < "4.0"
urllib3==1.26.12 ; python_version >= "3.8" and python_version < "4"
wcwidth==0.2.5 ; python_version >= "3.8" and python_version < "4"
xlrd==2.0.1 ; python_version >= "3.8" and python_version < "4.0"

View File

@ -1,78 +0,0 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
# universal: false
-e file:.
attrs==23.2.0
# via cattrs
# via requests-cache
bottleneck==1.3.8
# via pandas
cattrs==23.2.3
# via requests-cache
certifi==2024.2.2
# via requests
charset-normalizer==3.3.2
# via requests
colorama==0.4.6
# via csv-metadata-quality
country-converter==1.2
# via csv-metadata-quality
ftfy==6.2.0
# via csv-metadata-quality
idna==3.7
# via requests
llvmlite==0.43.0
# via numba
numba==0.60.0
# via pandas
numexpr==2.10.0
# via pandas
numpy==2.0.0
# via bottleneck
# via numba
# via numexpr
# via pandas
# via py3langid
# via pyarrow
pandas==2.2.2
# via country-converter
# via csv-metadata-quality
platformdirs==4.2.2
# via requests-cache
py3langid==0.3.0
# via csv-metadata-quality
pyarrow==16.1.0
# via pandas
pycountry==24.6.1
# via csv-metadata-quality
python-dateutil==2.9.0.post0
# via pandas
python-stdnum==1.20
# via csv-metadata-quality
pytz==2024.1
# via pandas
requests==2.32.2
# via csv-metadata-quality
# via requests-cache
requests-cache==1.2.1
# via csv-metadata-quality
six==1.16.0
# via python-dateutil
# via url-normalize
tzdata==2024.1
# via pandas
url-normalize==1.4.3
# via requests-cache
urllib3==2.2.1
# via requests
# via requests-cache
wcwidth==0.2.13
# via ftfy

25
requirements.txt Normal file
View File

@ -0,0 +1,25 @@
appdirs==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
attrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
cattrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
certifi==2022.6.15 ; python_version >= "3.8" and python_version < "4"
charset-normalizer==2.1.1 ; python_version >= "3.8" and python_version < "4"
colorama==0.4.5 ; python_version >= "3.8" and python_version < "4.0"
country-converter==0.7.7 ; python_version >= "3.8" and python_version < "4.0"
exceptiongroup==1.0.0rc9 ; python_version >= "3.8" and python_version <= "3.10"
ftfy==6.1.1 ; python_version >= "3.8" and python_version < "4"
idna==3.3 ; python_version >= "3.8" and python_version < "4"
langid==1.1.6 ; python_version >= "3.8" and python_version < "4.0"
numpy==1.23.2 ; python_version < "4.0" and python_version >= "3.8"
pandas==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
pycountry==22.3.5 ; python_version >= "3.8" and python_version < "4"
python-dateutil==2.8.2 ; python_version >= "3.8" and python_version < "4.0"
python-stdnum==1.17 ; python_version >= "3.8" and python_version < "4.0"
pytz==2022.2.1 ; python_version >= "3.8" and python_version < "4.0"
requests-cache==0.9.6 ; python_version >= "3.8" and python_version < "4.0"
requests==2.28.1 ; python_version >= "3.8" and python_version < "4"
setuptools==65.3.0 ; python_version >= "3.8" and python_version < "4"
six==1.16.0 ; python_version >= "3.8" and python_version < "4.0"
spdx-license-list==0.5.2 ; python_version >= "3.8" and python_version < "4.0"
url-normalize==1.4.3 ; python_version >= "3.8" and python_version < "4.0"
urllib3==1.26.12 ; python_version >= "3.8" and python_version < "4"
wcwidth==0.2.5 ; python_version >= "3.8" and python_version < "4"

6
setup.cfg Normal file
View File

@ -0,0 +1,6 @@
[isort]
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
use_parentheses=True
line_length=88

37
setup.py Normal file
View File

@ -0,0 +1,37 @@
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = [
"pandas",
"python-stdnum",
"requests",
"requests-cache",
"pycountry",
"langid",
]
setuptools.setup(
name="csv-metadata-quality",
version="0.6.0",
author="Alan Orth",
author_email="aorth@mjanja.ch",
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem.",
license="GPLv3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alanorth/csv-metadata-quality",
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
packages=["csv_metadata_quality"],
entry_points={
"console_scripts": ["csv-metadata-quality = csv_metadata_quality.__main__:main"]
},
install_requires=install_requires,
)

View File

@ -25,7 +25,7 @@ def test_check_valid_issn():
result = check.issn(value)
assert result is None
assert result == None
def test_check_invalid_isbn(capsys):
@ -46,7 +46,7 @@ def test_check_valid_isbn():
result = check.isbn(value)
assert result is None
assert result == None
def test_check_missing_date(capsys):
@ -102,7 +102,7 @@ def test_check_valid_date():
result = check.date(value, field_name)
assert result is None
assert result == None
def test_check_suspicious_characters(capsys):
@ -128,7 +128,7 @@ def test_check_valid_iso639_1_language():
result = check.language(value)
assert result is None
assert result == None
def test_check_valid_iso639_3_language():
@ -138,7 +138,7 @@ def test_check_valid_iso639_3_language():
result = check.language(value)
assert result is None
assert result == None
def test_check_invalid_iso639_1_language(capsys):
@ -249,7 +249,7 @@ def test_check_common_filename_extension():
result = check.filename_extension(value)
assert result is None
assert result == None
def test_check_incorrect_iso_639_1_language(capsys):
@ -257,7 +257,7 @@ def test_check_incorrect_iso_639_1_language(capsys):
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
language = "es"
exclude = []
exclude = list()
# Create a dictionary to mimic Pandas series
row = {"dc.title": title, "dc.language.iso": language}
@ -277,7 +277,7 @@ def test_check_incorrect_iso_639_3_language(capsys):
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
language = "spa"
exclude = []
exclude = list()
# Create a dictionary to mimic Pandas series
row = {"dc.title": title, "dc.language.iso": language}
@ -297,7 +297,7 @@ def test_check_correct_iso_639_1_language():
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
language = "en"
exclude = []
exclude = list()
# Create a dictionary to mimic Pandas series
row = {"dc.title": title, "dc.language.iso": language}
@ -305,7 +305,7 @@ def test_check_correct_iso_639_1_language():
result = experimental.correct_language(series, exclude)
assert result is None
assert result == None
def test_check_correct_iso_639_3_language():
@ -313,7 +313,7 @@ def test_check_correct_iso_639_3_language():
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
language = "eng"
exclude = []
exclude = list()
# Create a dictionary to mimic Pandas series
row = {"dc.title": title, "dc.language.iso": language}
@ -321,7 +321,7 @@ def test_check_correct_iso_639_3_language():
result = experimental.correct_language(series, exclude)
assert result is None
assert result == None
def test_check_valid_spdx_license_identifier():
@ -331,7 +331,7 @@ def test_check_valid_spdx_license_identifier():
result = check.spdx_license_identifier(license)
assert result is None
assert result == None
def test_check_invalid_spdx_license_identifier(capsys):
@ -339,7 +339,7 @@ def test_check_invalid_spdx_license_identifier(capsys):
license = "CC-BY-SA"
check.spdx_license_identifier(license)
result = check.spdx_license_identifier(license)
captured = capsys.readouterr()
assert (
@ -362,7 +362,7 @@ def test_check_duplicate_item(capsys):
}
df = pd.DataFrame(data=d)
check.duplicate_items(df)
result = check.duplicate_items(df)
captured = capsys.readouterr()
assert (
@ -379,7 +379,7 @@ def test_check_no_mojibake():
result = check.mojibake(field, field_name)
assert result is None
assert result == None
def test_check_mojibake(capsys):
@ -388,7 +388,7 @@ def test_check_mojibake(capsys):
field = "CIAT Publicaçao"
field_name = "dcterms.isPartOf"
check.mojibake(field, field_name)
result = check.mojibake(field, field_name)
captured = capsys.readouterr()
assert (
@ -407,18 +407,18 @@ def test_check_doi_field():
# the citation and a DOI field.
d = {"cg.identifier.doi": doi, "dcterms.bibliographicCitation": citation}
series = pd.Series(data=d)
exclude = []
exclude = list()
result = check.citation_doi(series, exclude)
assert result is None
assert result == None
def test_check_doi_only_in_citation(capsys):
"""Test an item with a DOI in its citation, but no DOI field."""
citation = "Orth, A. 2021. Testing all the things. doi: 10.1186/1743-422X-9-218"
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series), with
# an empty DOI field and a citation containing a DOI.
@ -439,7 +439,7 @@ def test_title_in_citation():
title = "Testing all the things"
citation = "Orth, A. 2021. Testing all the things."
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series), with
# the title and citation.
@ -448,7 +448,7 @@ def test_title_in_citation():
result = check.title_in_citation(series, exclude)
assert result is None
assert result == None
def test_title_not_in_citation(capsys):
@ -456,7 +456,7 @@ def test_title_not_in_citation(capsys):
title = "Testing all the things"
citation = "Orth, A. 2021. Testing all teh things."
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series), with
# the title and citation.
@ -477,7 +477,7 @@ def test_country_matches_region():
country = "Kenya"
region = "Eastern Africa"
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series)
d = {"cg.coverage.country": country, "cg.coverage.region": region}
@ -485,7 +485,7 @@ def test_country_matches_region():
result = check.countries_match_regions(series, exclude)
assert result is None
assert result == None
def test_country_not_matching_region(capsys):
@ -495,7 +495,7 @@ def test_country_not_matching_region(capsys):
country = "Kenya"
region = ""
missing_region = "Eastern Africa"
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series)
d = {
@ -510,5 +510,5 @@ def test_country_not_matching_region(capsys):
captured = capsys.readouterr()
assert (
captured.out
== f"{Fore.YELLOW}Missing region ({country} → {missing_region}): {Fore.RESET}{title}\n"
== f"{Fore.YELLOW}Missing region ({missing_region}): {Fore.RESET}{title}\n"
)

View File

@ -131,7 +131,7 @@ def test_fix_country_not_matching_region():
country = "Kenya"
region = ""
missing_region = "Eastern Africa"
exclude = []
exclude = list()
# Emulate a column in a transposed dataframe (which is just a series)
d = {
@ -152,11 +152,3 @@ def test_fix_country_not_matching_region():
series_correct = pd.Series(data=d_correct)
pd.testing.assert_series_equal(result, series_correct)
def test_fix_normalize_dois():
"""Test normalizing a DOI."""
value = "doi: 10.11648/j.jps.20140201.14"
assert fix.normalize_dois(value) == "https://doi.org/10.11648/j.jps.20140201.14"