1
0
mirror of https://github.com/ilri/csv-metadata-quality.git synced 2025-05-13 16:37:45 +02:00

1 Commits

Author SHA1 Message Date
0d719cf42a .drone.yml: add Python 3.11
All checks were successful
continuous-integration/drone/push Build is passing
2022-10-25 10:47:16 +03:00
22 changed files with 1055 additions and 8117 deletions

View File

@ -9,24 +9,24 @@ steps:
commands: commands:
- id - id
- python -V - python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git - apt update && apt install -y gcc g++ libicu-dev pkg-config
- python -m pip install poetry - pip install -r requirements-dev.txt
- poetry install - pytest
- poetry run pytest - python setup.py install
# Basic test # Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv - csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes # Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test # Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes # Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks # Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation # Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid) # Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
--- ---
kind: pipeline kind: pipeline
@ -39,24 +39,24 @@ steps:
commands: commands:
- id - id
- python -V - python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git - apt update && apt install -y gcc g++ libicu-dev pkg-config
- python -m pip install poetry - pip install -r requirements-dev.txt
- poetry install - pytest
- poetry run pytest - python setup.py install
# Basic test # Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv - csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes # Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test # Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes # Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks # Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation # Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid) # Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
--- ---
kind: pipeline kind: pipeline
@ -69,23 +69,53 @@ steps:
commands: commands:
- id - id
- python -V - python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config git - apt update && apt install -y gcc g++ libicu-dev pkg-config
- python -m pip install poetry - pip install -r requirements-dev.txt
- poetry install - pytest
- poetry run pytest - python setup.py install
# Basic test # Basic test
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv - csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes # Basic test with unsafe fixes
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test # Geography test
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes # Geography test with unsafe fixes
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u - csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks # Test with experimental checks
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e - csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation # Test with AGROVOC validation
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid) # Test with AGROVOC validation (and dropping invalid)
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d - csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
---
kind: pipeline
type: docker
name: python38
steps:
- name: test
image: python:3.8-slim
commands:
- id
- python -V
- apt update && apt install -y gcc g++ libicu-dev pkg-config
- pip install -r requirements-dev.txt
- pytest
- python setup.py install
# Basic test
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Basic test with unsafe fixes
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Geography test
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
# Geography test with unsafe fixes
- csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
# Test with experimental checks
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid)
- csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
# vim: ts=2 sw=2 et # vim: ts=2 sw=2 et

View File

@ -15,31 +15,37 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- name: Install poetry - name: Set up Python 3.10
run: pipx install poetry uses: actions/setup-python@v4
- uses: actions/setup-python@v4
with: with:
python-version: '3.11' python-version: '3.10'
cache: 'poetry' cache: 'pip'
- run: poetry install - name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
if [ -f requirements-dev.txt ]; then pip install -r requirements-dev.txt; fi
- name: Lint with flake8 - name: Lint with flake8
run: | run: |
# stop the build if there are Python syntax errors or undefined names # stop the build if there are Python syntax errors or undefined names
poetry run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
poetry run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest - name: Test with pytest
run: poetry run pytest run: |
pytest
- name: Test CLI - name: Test CLI
run: | run: |
python setup.py install
# Basic test # Basic test
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv csv-metadata-quality -i data/test.csv -o /tmp/test.csv
# Test with unsafe fixes # Test with unsafe fixes
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
# Test with experimental checks # Test with experimental checks
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
# Test with AGROVOC validation # Test with AGROVOC validation
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
# Test with AGROVOC validation (and dropping invalid) # Test with AGROVOC validation (and dropping invalid)
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d

View File

@ -4,39 +4,7 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased ## [0.6.0] = 2022-09-02
### Fixed
- Fixed regex so we don't run the invalid multi-value separator fix on
`dcterms.bibliographicCitation` fields
- Fixed regex so we run the comma space fix on `dcterms.bibliographicCitation`
fields
- Don't crash the country/region checker/fixer when a title field is missing
### Changed
- Don't run newline fix on description fields
- Install requests-cache in main run() function instead of check.agrovoc() function so we only incur the overhead once
### Updated
- Python dependencies, including Pandas 2.0.0 and [Arrow-backed dtypes](https://datapythonista.me/blog/pandas-20-and-the-arrow-revolution-part-i)
## [0.6.1] - 2023-02-23
### Fixed
- Missing region check should ignore subregion field, if it exists
### Changed
- Use SPDX license data from SPDX themselves instead of spdx-license-list
because it is deprecated and outdated
- Require Python 3.9+
- Don't run `fix.separators()` on title or abstract fields
- Don't run whitespace or newline fixes on abstract fields
- Ignore some common non-SPDX licenses
- Ignore `__description` suffix in filenames meant for SAFBuilder when checking
for uncommon file extensions
### Updated
- Python dependencies
## [0.6.0] - 2022-09-02
### Changed ### Changed
- Perform fix for "unnecessary" Unicode characters after we try to fix encoding - Perform fix for "unnecessary" Unicode characters after we try to fix encoding
issues with ftfy issues with ftfy

View File

@ -1 +0,0 @@
include csv_metadata_quality/data/licenses.json

View File

@ -8,7 +8,7 @@
A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem (though it could theoretically work on any CSV that uses Dublin Core fields as columns). The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, unnecessary Unicode, AGROVOC terms, etc. A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem (though it could theoretically work on any CSV that uses Dublin Core fields as columns). The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, unnecessary Unicode, AGROVOC terms, etc.
Requires Python 3.9 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library. Requires Python 3.8 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library.
If you use the DSpace CSV metadata quality checker please cite: If you use the DSpace CSV metadata quality checker please cite:
@ -127,6 +127,7 @@ This currently uses the [Python langid](https://github.com/saffsd/langid.py) lib
- Warn if an author is shorter than 3 characters? - Warn if an author is shorter than 3 characters?
- Validate DOIs? Normalize to https://doi.org format? Or use just the DOI part: 10.1016/j.worlddev.2010.06.006 - Validate DOIs? Normalize to https://doi.org format? Or use just the DOI part: 10.1016/j.worlddev.2010.06.006
- Warn if two items use the same file in `filename` column - Warn if two items use the same file in `filename` column
- Add an option to drop invalid AGROVOC subjects?
- Add tests for application invocation, ie `tests/test_app.py`? - Add tests for application invocation, ie `tests/test_app.py`?
- Validate ISSNs or journal titles against CrossRef API? - Validate ISSNs or journal titles against CrossRef API?
- Add configurable field validation, like specify a field name and a validation file? - Add configurable field validation, like specify a field name and a validation file?
@ -136,7 +137,7 @@ This currently uses the [Python langid](https://github.com/saffsd/langid.py) lib
- Warn if item is Open Access, but missing a license - Warn if item is Open Access, but missing a license
- Warn if item has an ISSN but no journal title - Warn if item has an ISSN but no journal title
- Update journal titles from ISSN - Update journal titles from ISSN
- Migrate from Pandas to Polars - Migrate to https://github.com/spdx/license-list-data
## License ## License
This work is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.en.html). This work is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.en.html).

View File

@ -1,14 +1,11 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
import argparse import argparse
import os
import re import re
import signal import signal
import sys import sys
from datetime import timedelta
import pandas as pd import pandas as pd
import requests_cache
from colorama import Fore from colorama import Fore
import csv_metadata_quality.check as check import csv_metadata_quality.check as check
@ -77,7 +74,7 @@ def run(argv):
signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGINT, signal_handler)
# Read all fields as strings so dates don't get converted from 1998 to 1998.0 # Read all fields as strings so dates don't get converted from 1998 to 1998.0
df = pd.read_csv(args.input_file, dtype_backend="pyarrow", dtype="str") df = pd.read_csv(args.input_file, dtype=str)
# Check if the user requested to skip any fields # Check if the user requested to skip any fields
if args.exclude_fields: if args.exclude_fields:
@ -85,20 +82,7 @@ def run(argv):
# user should be careful to no include spaces here. # user should be careful to no include spaces here.
exclude = args.exclude_fields.split(",") exclude = args.exclude_fields.split(",")
else: else:
exclude = [] exclude = list()
# enable transparent request cache with thirty days expiry
expire_after = timedelta(days=30)
# Allow overriding the location of the requests cache, just in case we are
# running in an environment where we can't write to the current working di-
# rectory (for example from csv-metadata-quality-web).
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
requests_cache.install_cache(
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
)
# prune old cache entries
requests_cache.delete()
for column in df.columns: for column in df.columns:
if column in exclude: if column in exclude:
@ -106,21 +90,17 @@ def run(argv):
continue continue
if args.unsafe_fixes:
# Skip whitespace and newline fixes on abstracts and descriptions
# because there are too many with legitimate multi-line metadata.
match = re.match(r"^.*?(abstract|description).*$", column)
if match is None:
# Fix: whitespace # Fix: whitespace
df[column] = df[column].apply(fix.whitespace, field_name=column) df[column] = df[column].apply(fix.whitespace, field_name=column)
# Fix: newlines # Fix: newlines
if args.unsafe_fixes:
df[column] = df[column].apply(fix.newlines, field_name=column) df[column] = df[column].apply(fix.newlines, field_name=column)
# Fix: missing space after comma. Only run on author and citation # Fix: missing space after comma. Only run on author and citation
# fields for now, as this problem is mostly an issue in names. # fields for now, as this problem is mostly an issue in names.
if args.unsafe_fixes: if args.unsafe_fixes:
match = re.match(r"^.*?(author|[Cc]itation).*$", column) match = re.match(r"^.*?(author|citation).*$", column)
if match is not None: if match is not None:
df[column] = df[column].apply(fix.comma_space, field_name=column) df[column] = df[column].apply(fix.comma_space, field_name=column)
@ -141,11 +121,7 @@ def run(argv):
# Fix: unnecessary Unicode # Fix: unnecessary Unicode
df[column] = df[column].apply(fix.unnecessary_unicode) df[column] = df[column].apply(fix.unnecessary_unicode)
# Fix: invalid and unnecessary multi-value separators. Skip the title # Fix: invalid and unnecessary multi-value separators
# and abstract fields because "|" is used to indicate something like
# a subtitle.
match = re.match(r"^.*?(abstract|[Cc]itation|title).*$", column)
if match is None:
df[column] = df[column].apply(fix.separators, field_name=column) df[column] = df[column].apply(fix.separators, field_name=column)
# Run whitespace fix again after fixing invalid separators # Run whitespace fix again after fixing invalid separators
df[column] = df[column].apply(fix.whitespace, field_name=column) df[column] = df[column].apply(fix.whitespace, field_name=column)

View File

@ -1,18 +1,21 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
import logging import logging
import os
import re import re
from datetime import datetime, timedelta from datetime import datetime, timedelta
import country_converter as coco import country_converter as coco
import pandas as pd import pandas as pd
import requests import requests
import requests_cache
import spdx_license_list
from colorama import Fore from colorama import Fore
from pycountry import languages from pycountry import languages
from stdnum import isbn as stdnum_isbn from stdnum import isbn as stdnum_isbn
from stdnum import issn as stdnum_issn from stdnum import issn as stdnum_issn
from csv_metadata_quality.util import is_mojibake, load_spdx_licenses from csv_metadata_quality.util import is_mojibake
def issn(field): def issn(field):
@ -31,6 +34,7 @@ def issn(field):
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
if not stdnum_issn.is_valid(value): if not stdnum_issn.is_valid(value):
print(f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}") print(f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}")
@ -53,6 +57,7 @@ def isbn(field):
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
if not stdnum_isbn.is_valid(value): if not stdnum_isbn.is_valid(value):
print(f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}") print(f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}")
@ -169,6 +174,7 @@ def language(field):
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
# After splitting, check if language value is 2 or 3 characters so we # After splitting, check if language value is 2 or 3 characters so we
# can check it against ISO 639-1 or ISO 639-3 accordingly. # can check it against ISO 639-1 or ISO 639-3 accordingly.
if len(value) == 2: if len(value) == 2:
@ -201,12 +207,25 @@ def agrovoc(field, field_name, drop):
if pd.isna(field): if pd.isna(field):
return return
# enable transparent request cache with thirty days expiry
expire_after = timedelta(days=30)
# Allow overriding the location of the requests cache, just in case we are
# running in an environment where we can't write to the current working di-
# rectory (for example from csv-metadata-quality-web).
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
requests_cache.install_cache(
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
)
# prune old cache entries
# requests_cache.remove_expired_responses()
# Initialize an empty list to hold the validated AGROVOC values # Initialize an empty list to hold the validated AGROVOC values
values = [] values = list()
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
request_url = "https://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search" request_url = "http://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search"
request_params = {"query": value} request_params = {"query": value}
request = requests.get(request_url, params=request_params) request = requests.get(request_url, params=request_params)
@ -268,11 +287,6 @@ def filename_extension(field):
# Iterate over all values # Iterate over all values
for value in values: for value in values:
# Strip filename descriptions that are meant for SAF Bundler, for
# example: Annual_Report_2020.pdf__description:Report
if "__description" in value:
value = value.split("__")[0]
# Assume filename extension does not match # Assume filename extension does not match
filename_extension_match = False filename_extension_match = False
@ -299,26 +313,13 @@ def spdx_license_identifier(field):
Prints the value if it is invalid. Prints the value if it is invalid.
""" """
# List of common non-SPDX licenses to ignore
# See: https://ilri.github.io/cgspace-submission-guidelines/dcterms-license/dcterms-license.txt
ignore_licenses = {
"All rights reserved; no re-use allowed",
"All rights reserved; self-archive copy only",
"Copyrighted; Non-commercial educational use only",
"Copyrighted; Non-commercial use only",
"Copyrighted; all rights reserved",
"Other",
}
# Skip fields with missing values # Skip fields with missing values
if pd.isna(field) or field in ignore_licenses: if pd.isna(field):
return return
spdx_licenses = load_spdx_licenses()
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
if value not in spdx_licenses: if value not in spdx_license_list.LICENSES:
print(f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{value}") print(f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{value}")
return return
@ -358,7 +359,7 @@ def duplicate_items(df):
if items_count_unique < items_count_total: if items_count_unique < items_count_total:
# Create a list to hold our items while we check for duplicates # Create a list to hold our items while we check for duplicates
items = [] items = list()
for index, row in df.iterrows(): for index, row in df.iterrows():
item_title_type_date = f"{row[title_column_name]}{row[type_column_name]}{row[date_column_name]}" item_title_type_date = f"{row[title_column_name]}{row[type_column_name]}{row[date_column_name]}"
@ -511,9 +512,9 @@ def countries_match_regions(row, exclude):
if match is not None: if match is not None:
country_column_name = label country_column_name = label
# Find the name of the region column, but make sure it's not subregion! # Find the name of the region column
match = re.match(r"^.*?region.*$", label) match = re.match(r"^.*?region.*$", label)
if match is not None and "sub" not in label: if match is not None:
region_column_name = label region_column_name = label
# Find the name of the title column # Find the name of the title column
@ -539,7 +540,7 @@ def countries_match_regions(row, exclude):
if row[region_column_name] is not None: if row[region_column_name] is not None:
regions = row[region_column_name].split("||") regions = row[region_column_name].split("||")
else: else:
regions = [] regions = list()
for country in countries: for country in countries:
# Look up the UN M.49 regions for this country code. CoCo seems to # Look up the UN M.49 regions for this country code. CoCo seems to
@ -548,13 +549,8 @@ def countries_match_regions(row, exclude):
un_region = cc.convert(names=country, to="UNRegion") un_region = cc.convert(names=country, to="UNRegion")
if un_region != "not found" and un_region not in regions: if un_region != "not found" and un_region not in regions:
try:
print( print(
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}{row[title_column_name]}" f"{Fore.YELLOW}Missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
)
except KeyError:
print(
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}<title field not present>"
) )
return return

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ def correct_language(row, exclude):
# Initialize some variables at global scope so that we can set them in the # Initialize some variables at global scope so that we can set them in the
# loop scope below and still be able to access them afterwards. # loop scope below and still be able to access them afterwards.
language = "" language = ""
sample_strings = [] sample_strings = list()
title = None title = None
# Iterate over the labels of the current row's values. Before we transposed # Iterate over the labels of the current row's values. Before we transposed

View File

@ -23,7 +23,7 @@ def whitespace(field, field_name):
return return
# Initialize an empty list to hold the cleaned values # Initialize an empty list to hold the cleaned values
values = [] values = list()
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
@ -64,7 +64,7 @@ def separators(field, field_name):
return return
# Initialize an empty list to hold the cleaned values # Initialize an empty list to hold the cleaned values
values = [] values = list()
# Try to split multi-value field on "||" separator # Try to split multi-value field on "||" separator
for value in field.split("||"): for value in field.split("||"):
@ -175,7 +175,7 @@ def duplicates(field, field_name):
values = field.split("||") values = field.split("||")
# Initialize an empty list to hold the de-duplicated values # Initialize an empty list to hold the de-duplicated values
new_values = [] new_values = list()
# Iterate over all values # Iterate over all values
for value in values: for value in values:
@ -327,9 +327,9 @@ def countries_match_regions(row, exclude):
if match is not None: if match is not None:
country_column_name = label country_column_name = label
# Find the name of the region column, but make sure it's not subregion! # Find the name of the region column
match = re.match(r"^.*?region.*$", label) match = re.match(r"^.*?region.*$", label)
if match is not None and "sub" not in label: if match is not None:
region_column_name = label region_column_name = label
# Find the name of the title column # Find the name of the title column
@ -355,10 +355,10 @@ def countries_match_regions(row, exclude):
if row[region_column_name] is not None: if row[region_column_name] is not None:
regions = row[region_column_name].split("||") regions = row[region_column_name].split("||")
else: else:
regions = [] regions = list()
# An empty list for our regions so we can keep track for all countries # An empty list for our regions so we can keep track for all countries
missing_regions = [] missing_regions = list()
for country in countries: for country in countries:
# Look up the UN M.49 regions for this country code. CoCo seems to # Look up the UN M.49 regions for this country code. CoCo seems to
@ -370,17 +370,9 @@ def countries_match_regions(row, exclude):
# it doesn't already exist in regions. # it doesn't already exist in regions.
if un_region != "not found" and un_region not in regions: if un_region != "not found" and un_region not in regions:
if un_region not in missing_regions: if un_region not in missing_regions:
try:
print( print(
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}" f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
) )
except KeyError:
# If there is no title column in the CSV we will print
# the fix without the title instead of crashing.
print(
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}<title field not present>"
)
missing_regions.append(un_region) missing_regions.append(un_region)
if len(missing_regions) > 0: if len(missing_regions) > 0:

View File

@ -1,9 +1,5 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
import json
import os
from ftfy.badness import is_bad from ftfy.badness import is_bad
@ -53,13 +49,3 @@ def is_mojibake(field):
else: else:
# Encodable as CP-1252, Mojibake alert level high # Encodable as CP-1252, Mojibake alert level high
return True return True
def load_spdx_licenses():
"""Returns a Python list of SPDX short license identifiers."""
with open(os.path.join(os.path.dirname(__file__), "data/licenses.json")) as f:
licenses = json.load(f)
# List comprehension to extract the license ID for each license
return [license["licenseId"] for license in licenses["licenses"]]

View File

@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-3.0-only # SPDX-License-Identifier: GPL-3.0-only
VERSION = "0.6.1" VERSION = "0.6.0"

View File

@ -1,17 +0,0 @@
id,dc.title,dcterms.abstract
1,Normal item,This is an abstract
2,Leading whitespace, This is an abstract
3,Trailing whitespace,This is an abstract
4,Consecutive whitespace,This is an abstract
5,Newline,"This
is an abstract"
6,Newline with leading whitespace," This
is an abstract"
7,Newline with trailing whitespace,"This
is an abstract "
8,Newline with consecutive whitespace,"This
is an abstract"
9,Multiple newlines,"This
is
an
abstract"
1 id dc.title dcterms.abstract
2 1 Normal item This is an abstract
3 2 Leading whitespace This is an abstract
4 3 Trailing whitespace This is an abstract
5 4 Consecutive whitespace This is an abstract
6 5 Newline This is an abstract
7 6 Newline with leading whitespace This is an abstract
8 7 Newline with trailing whitespace This is an abstract
9 8 Newline with consecutive whitespace This is an abstract
10 9 Multiple newlines This is an abstract

View File

@ -1,39 +1,38 @@
dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region,cg.coverage.subregion dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region
Leading space,2019-07-29,,,,,,,,,,,, Leading space,2019-07-29,,,,,,,,,,,
Trailing space ,2019-07-29,,,,,,,,,,,, Trailing space ,2019-07-29,,,,,,,,,,,
Excessive space,2019-07-29,,,,,,,,,,,, Excessive space,2019-07-29,,,,,,,,,,,
Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,, Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,
Duplicate||Duplicate,2019-07-29,,,,,,,,,,,, Duplicate||Duplicate,2019-07-29,,,,,,,,,,,
Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,, Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,
Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,, Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,
Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,, Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,
Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,, Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,
Invalid date,2019-07-260,,,,,,,,,,,, Invalid date,2019-07-260,,,,,,,,,,,
Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,, Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,
Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,, Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,
Unnecessary Unicode,2019-07-29,,,,,,,,,,,, Unnecessary Unicode,2019-07-29,,,,,,,,,,,
Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,, Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,
Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,, Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,
Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,, Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,
Invalid language,2019-07-29,,,Span,,,,,,,,, Invalid language,2019-07-29,,,Span,,,,,,,,
Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,, Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,
Newline (LF),2019-07-30,,,,"TANZA Newline (LF),2019-07-30,,,,"TANZA
NIA",,,,,,,, NIA",,,,,,,
Missing date,,,,,,,,,,,,, Missing date,,,,,,,,,,,,
Invalid country,2019-08-01,,,,,KENYAA,,,,,,, Invalid country,2019-08-01,,,,,KENYAA,,,,,,
Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,, Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,
Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-­92-­9043-­823-­6,,,,,,,,,, Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-­92-­9043-­823-­6,,,,,,,,,
"Missing space,after comma",2019-08-27,,,,,,,,,,,, "Missing space,after comma",2019-08-27,,,,,,,,,,,
Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,, Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,
Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,, Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,
Composéd Unicode,2020-01-14,,,,,,,,,,,, Composéd Unicode,2020-01-14,,,,,,,,,,,
Decomposéd Unicode,2020-01-14,,,,,,,,,,,, Decomposéd Unicode,2020-01-14,,,,,,,,,,,
Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,, Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,
Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,, Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,, Duplicate Title,2021-03-17,,,,,,,,Report,,,
Duplicate Title,2021-03-17,,,,,,,,Report,,,, Duplicate Title,2021-03-17,,,,,,,,Report,,,
Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,, Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,
"DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,, "DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,
Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,, Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,
Country missing region,2021-12-08,,,,,Kenya,,,,,,, Country missing region,2021-12-08,,,,,Kenya,,,,,,
Subregion field shouldnt trigger region checks,2022-12-07,,,,,Kenya,,,,,,Eastern Africa,Baringo

1 dc.title dcterms.issued dc.identifier.issn dc.identifier.isbn dcterms.language dcterms.subject cg.coverage.country filename dcterms.license dcterms.type dcterms.bibliographicCitation cg.identifier.doi cg.coverage.region cg.coverage.subregion
2 Leading space 2019-07-29
3 Trailing space 2019-07-29
4 Excessive space 2019-07-29
5 Miscellaenous ||whitespace | issues 2019-07-29
6 Duplicate||Duplicate 2019-07-29
7 Invalid ISSN 2019-07-29 2321-2302
8 Invalid ISBN 2019-07-29 978-0-306-40615-6
9 Multiple valid ISSNs 2019-07-29 0378-5955||0024-9319
10 Multiple valid ISBNs 2019-07-29 99921-58-10-7||978-0-306-40615-7
11 Invalid date 2019-07-260
12 Multiple dates 2019-07-26||2019-01-10
13 Invalid multi-value separator 2019-07-29 0378-5955|0024-9319
14 Unnecessary Unicode​ 2019-07-29
15 Suspicious character||foreˆt 2019-07-29
16 Invalid ISO 639-1 (alpha 2) language 2019-07-29 jp
17 Invalid ISO 639-3 (alpha 3) language 2019-07-29 chi
18 Invalid language 2019-07-29 Span
19 Invalid AGROVOC subject 2019-07-29 LIVESTOCK||FOREST
20 Newline (LF) 2019-07-30 TANZA NIA
21 Missing date
22 Invalid country 2019-08-01 KENYAA
23 Uncommon filename extension 2019-08-10 file.pdf.lck
24 Unneccesary unicode (U+002D + U+00AD) 2019-08-10 978-­92-­9043-­823-­6
25 Missing space,after comma 2019-08-27
26 Incorrect ISO 639-1 language 2019-09-26 es
27 Incorrect ISO 639-3 language 2019-09-26 spa
28 Composéd Unicode 2020-01-14
29 Decomposéd Unicode 2020-01-14
30 Unnecessary multi-value separator 2021-01-03 0378-5955||
31 Invalid SPDX license identifier 2021-03-11 CC-BY
32 Duplicate Title 2021-03-17 Report
33 Duplicate Title 2021-03-17 Report
34 Mojibake 2021-03-18 Publicaçao CIAT Report
35 DOI in citation, but missing cg.identifier.doi 2021-10-06 Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218
36 Title missing from citation 2021-12-05 Orth, A. 2021. Title missing f rom citation.
37 Country missing region 2021-12-08 Kenya
38 Baringo

2117
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "csv-metadata-quality" name = "csv-metadata-quality"
version = "0.6.1" version = "0.6.0"
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem." description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem."
authors = ["Alan Orth <alan.orth@gmail.com>"] authors = ["Alan Orth <alan.orth@gmail.com>"]
license="GPL-3.0-only" license="GPL-3.0-only"
@ -11,31 +11,26 @@ homepage = "https://github.com/ilri/csv-metadata-quality"
csv-metadata-quality = 'csv_metadata_quality.__main__:main' csv-metadata-quality = 'csv_metadata_quality.__main__:main'
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.9" python = "^3.8"
pandas = {version = "^2.0.2", extras = ["feather", "performance"]} pandas = "^1.4.0"
python-stdnum = "^1.18" python-stdnum = "^1.13"
requests = "^2.28.2" requests = "^2.28.1"
requests-cache = "^1.0.0" requests-cache = "^0.9.6"
pycountry = "^22.3.5"
langid = "^1.1.6" langid = "^1.1.6"
colorama = "^0.4.6" colorama = "^0.4.5"
spdx-license-list = "^0.5.2"
ftfy = "^6.1.1" ftfy = "^6.1.1"
country-converter = "~1.1.0" country-converter = "^0.7.7"
pycountry = {git = "https://github.com/alanorth/pycountry", rev = "iso-codes-4.15.0"}
[tool.poetry.group.dev.dependencies] [tool.poetry.dev-dependencies]
pytest = "^7.2.1" pytest = "^7.1.3"
flake8 = "^6.0.0" flake8 = "^5.0.4"
pytest-clarity = "^1.0.1" pytest-clarity = "^1.0.1"
black = "^23.1.0" black = "^22.8.0"
isort = "^5.12.0" isort = "^5.10.1"
csvkit = "^1.1.0" csvkit = "^1.0.7"
ipython = "^8.10.0"
fixit = "^2.1.0"
[build-system] [build-system]
requires = ["poetry>=0.12"] requires = ["poetry>=0.12"]
build-backend = "poetry.masonry.api" build-backend = "poetry.masonry.api"
[tool.isort]
profile = "black"
line_length=88

View File

@ -1,9 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
],
"pip_requirements": {
"enabled": false
}
}

View File

@ -1,82 +1,68 @@
agate-dbf==0.2.2 ; python_version >= "3.9" and python_version < "4.0" agate-dbf==0.2.2 ; python_version >= "3.8" and python_version < "4.0"
agate-excel==0.2.5 ; python_version >= "3.9" and python_version < "4.0" agate-excel==0.2.5 ; python_version >= "3.8" and python_version < "4.0"
agate-sql==0.5.9 ; python_version >= "3.9" and python_version < "4.0" agate-sql==0.5.8 ; python_version >= "3.8" and python_version < "4.0"
agate==1.7.1 ; python_version >= "3.9" and python_version < "4.0" agate==1.6.3 ; python_version >= "3.8" and python_version < "4.0"
appdirs==1.4.4 ; python_version >= "3.9" and python_version < "4.0" appdirs==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
appnope==0.1.3 ; python_version >= "3.9" and python_version < "4.0" and sys_platform == "darwin" attrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
asttokens==2.2.1 ; python_version >= "3.9" and python_version < "4.0" babel==2.10.3 ; python_version >= "3.8" and python_version < "4.0"
attrs==23.1.0 ; python_version >= "3.9" and python_version < "4.0" black==22.8.0 ; python_version >= "3.8" and python_version < "4.0"
babel==2.12.1 ; python_version >= "3.9" and python_version < "4.0" cattrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
backcall==0.2.0 ; python_version >= "3.9" and python_version < "4.0" certifi==2022.6.15 ; python_version >= "3.8" and python_version < "4"
black==23.3.0 ; python_version >= "3.9" and python_version < "4.0" charset-normalizer==2.1.1 ; python_version >= "3.8" and python_version < "4"
cattrs==22.2.0 ; python_version >= "3.9" and python_version < "4.0" click==8.1.3 ; python_version >= "3.8" and python_version < "4.0"
certifi==2022.12.7 ; python_version >= "3.9" and python_version < "4.0" colorama==0.4.5 ; python_version >= "3.8" and python_version < "4.0"
charset-normalizer==3.1.0 ; python_version >= "3.9" and python_version < "4.0" commonmark==0.9.1 ; python_version >= "3.8" and python_version < "4.0"
click==8.1.3 ; python_version >= "3.9" and python_version < "4.0" country-converter==0.7.7 ; python_version >= "3.8" and python_version < "4.0"
colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0" csvkit==1.0.7 ; python_version >= "3.8" and python_version < "4.0"
country-converter==1.0.0 ; python_version >= "3.9" and python_version < "4.0" dbfread==2.0.7 ; python_version >= "3.8" and python_version < "4.0"
csvkit==1.1.1 ; python_version >= "3.9" and python_version < "4.0" et-xmlfile==1.1.0 ; python_version >= "3.8" and python_version < "4.0"
dbfread==2.0.7 ; python_version >= "3.9" and python_version < "4.0" exceptiongroup==1.0.0rc9 ; python_version >= "3.8" and python_version <= "3.10"
decorator==5.1.1 ; python_version >= "3.9" and python_version < "4.0" flake8==5.0.4 ; python_version >= "3.8" and python_version < "4.0"
et-xmlfile==1.1.0 ; python_version >= "3.9" and python_version < "4.0" ftfy==6.1.1 ; python_version >= "3.8" and python_version < "4"
exceptiongroup==1.1.1 ; python_version >= "3.9" and python_version < "3.11" future==0.18.2 ; python_version >= "3.8" and python_version < "4.0"
executing==1.2.0 ; python_version >= "3.9" and python_version < "4.0" greenlet==1.1.3 ; python_version >= "3.8" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0"
flake8==6.0.0 ; python_version >= "3.9" and python_version < "4.0" idna==3.3 ; python_version >= "3.8" and python_version < "4"
ftfy==6.1.1 ; python_version >= "3.9" and python_version < "4" iniconfig==1.1.1 ; python_version >= "3.8" and python_version < "4.0"
greenlet==2.0.2 ; python_version >= "3.9" and platform_machine == "aarch64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "ppc64le" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "x86_64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "amd64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "AMD64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "win32" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "WIN32" and python_version < "4.0" isodate==0.6.1 ; python_version >= "3.8" and python_version < "4.0"
idna==3.4 ; python_version >= "3.9" and python_version < "4.0" isort==5.10.1 ; python_version >= "3.8" and python_version < "4.0"
iniconfig==2.0.0 ; python_version >= "3.9" and python_version < "4.0" langid==1.1.6 ; python_version >= "3.8" and python_version < "4.0"
ipython==8.13.1 ; python_version >= "3.9" and python_version < "4.0" leather==0.3.4 ; python_version >= "3.8" and python_version < "4.0"
isodate==0.6.1 ; python_version >= "3.9" and python_version < "4.0" mccabe==0.7.0 ; python_version >= "3.8" and python_version < "4.0"
isort==5.12.0 ; python_version >= "3.9" and python_version < "4.0" mypy-extensions==0.4.3 ; python_version >= "3.8" and python_version < "4.0"
jedi==0.18.2 ; python_version >= "3.9" and python_version < "4.0" numpy==1.23.2 ; python_version < "4.0" and python_version >= "3.8"
langid==1.1.6 ; python_version >= "3.9" and python_version < "4.0" olefile==0.46 ; python_version >= "3.8" and python_version < "4.0"
leather==0.3.4 ; python_version >= "3.9" and python_version < "4.0" openpyxl==3.0.10 ; python_version >= "3.8" and python_version < "4.0"
markdown-it-py==2.2.0 ; python_version >= "3.9" and python_version < "4.0" packaging==21.3 ; python_version >= "3.8" and python_version < "4.0"
matplotlib-inline==0.1.6 ; python_version >= "3.9" and python_version < "4.0" pandas==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
mccabe==0.7.0 ; python_version >= "3.9" and python_version < "4.0" parsedatetime==2.4 ; python_version >= "3.8" and python_version < "4.0"
mdurl==0.1.2 ; python_version >= "3.9" and python_version < "4.0" pathspec==0.10.1 ; python_version >= "3.8" and python_version < "4.0"
mypy-extensions==1.0.0 ; python_version >= "3.9" and python_version < "4.0" platformdirs==2.5.2 ; python_version >= "3.8" and python_version < "4.0"
numpy==1.24.3 ; python_version >= "3.9" and python_version < "4.0" pluggy==1.0.0 ; python_version >= "3.8" and python_version < "4.0"
olefile==0.46 ; python_version >= "3.9" and python_version < "4.0" pprintpp==0.4.0 ; python_version >= "3.8" and python_version < "4.0"
openpyxl==3.1.2 ; python_version >= "3.9" and python_version < "4.0" py==1.11.0 ; python_version >= "3.8" and python_version < "4.0"
packaging==23.1 ; python_version >= "3.9" and python_version < "4.0" pycodestyle==2.9.1 ; python_version >= "3.8" and python_version < "4.0"
pandas==2.0.1 ; python_version >= "3.9" and python_version < "4.0" pycountry==22.3.5 ; python_version >= "3.8" and python_version < "4"
parsedatetime==2.6 ; python_version >= "3.9" and python_version < "4.0" pyflakes==2.5.0 ; python_version >= "3.8" and python_version < "4.0"
parso==0.8.3 ; python_version >= "3.9" and python_version < "4.0" pygments==2.13.0 ; python_version >= "3.8" and python_version < "4.0"
pathspec==0.11.1 ; python_version >= "3.9" and python_version < "4.0" pyparsing==3.0.9 ; python_version >= "3.8" and python_version < "4.0"
pexpect==4.8.0 ; python_version >= "3.9" and python_version < "4.0" and sys_platform != "win32" pytest-clarity==1.0.1 ; python_version >= "3.8" and python_version < "4.0"
pickleshare==0.7.5 ; python_version >= "3.9" and python_version < "4.0" pytest==7.1.3 ; python_version >= "3.8" and python_version < "4.0"
platformdirs==3.5.0 ; python_version >= "3.9" and python_version < "4.0" python-dateutil==2.8.2 ; python_version >= "3.8" and python_version < "4.0"
pluggy==1.0.0 ; python_version >= "3.9" and python_version < "4.0" python-slugify==6.1.2 ; python_version >= "3.8" and python_version < "4.0"
pprintpp==0.4.0 ; python_version >= "3.9" and python_version < "4.0" python-stdnum==1.17 ; python_version >= "3.8" and python_version < "4.0"
prompt-toolkit==3.0.38 ; python_version >= "3.9" and python_version < "4.0" pytimeparse==1.1.8 ; python_version >= "3.8" and python_version < "4.0"
ptyprocess==0.7.0 ; python_version >= "3.9" and python_version < "4.0" and sys_platform != "win32" pytz==2022.2.1 ; python_version >= "3.8" and python_version < "4.0"
pure-eval==0.2.2 ; python_version >= "3.9" and python_version < "4.0" requests-cache==0.9.6 ; python_version >= "3.8" and python_version < "4.0"
pyarrow==11.0.0 ; python_version >= "3.9" and python_version < "4.0" requests==2.28.1 ; python_version >= "3.8" and python_version < "4"
pycodestyle==2.10.0 ; python_version >= "3.9" and python_version < "4.0" rich==12.5.1 ; python_version >= "3.8" and python_version < "4.0"
pycountry @ git+https://github.com/alanorth/pycountry@iso-codes-4.13.0 ; python_version >= "3.9" and python_version < "4.0" setuptools==65.3.0 ; python_version >= "3.8" and python_version < "4"
pyflakes==3.0.1 ; python_version >= "3.9" and python_version < "4.0" six==1.16.0 ; python_version >= "3.8" and python_version < "4.0"
pygments==2.15.1 ; python_version >= "3.9" and python_version < "4.0" spdx-license-list==0.5.2 ; python_version >= "3.8" and python_version < "4.0"
pytest-clarity==1.0.1 ; python_version >= "3.9" and python_version < "4.0" sqlalchemy==1.4.40 ; python_version >= "3.8" and python_version < "4.0"
pytest==7.3.1 ; python_version >= "3.9" and python_version < "4.0" text-unidecode==1.3 ; python_version >= "3.8" and python_version < "4.0"
python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "4.0" tomli==2.0.1 ; python_version >= "3.8" and python_version < "4.0"
python-slugify==8.0.1 ; python_version >= "3.9" and python_version < "4.0" typing-extensions==4.3.0 ; python_version >= "3.8" and python_version < "3.10"
python-stdnum==1.18 ; python_version >= "3.9" and python_version < "4.0" url-normalize==1.4.3 ; python_version >= "3.8" and python_version < "4.0"
pytimeparse==1.1.8 ; python_version >= "3.9" and python_version < "4.0" urllib3==1.26.12 ; python_version >= "3.8" and python_version < "4"
pytz==2023.3 ; python_version >= "3.9" and python_version < "4.0" wcwidth==0.2.5 ; python_version >= "3.8" and python_version < "4"
requests-cache==0.9.8 ; python_version >= "3.9" and python_version < "4.0" xlrd==2.0.1 ; python_version >= "3.8" and python_version < "4.0"
requests==2.29.0 ; python_version >= "3.9" and python_version < "4.0"
rich==13.3.5 ; python_version >= "3.9" and python_version < "4.0"
six==1.16.0 ; python_version >= "3.9" and python_version < "4.0"
sqlalchemy==1.4.48 ; python_version >= "3.9" and python_version < "4.0"
stack-data==0.6.2 ; python_version >= "3.9" and python_version < "4.0"
text-unidecode==1.3 ; python_version >= "3.9" and python_version < "4.0"
tomli==2.0.1 ; python_version >= "3.9" and python_version < "3.11"
traitlets==5.9.0 ; python_version >= "3.9" and python_version < "4.0"
typing-extensions==4.5.0 ; python_version >= "3.9" and python_version < "3.10"
tzdata==2023.3 ; python_version >= "3.9" and python_version < "4.0"
url-normalize==1.4.3 ; python_version >= "3.9" and python_version < "4.0"
urllib3==1.26.15 ; python_version >= "3.9" and python_version < "4.0"
wcwidth==0.2.6 ; python_version >= "3.9" and python_version < "4"
xlrd==2.0.1 ; python_version >= "3.9" and python_version < "4.0"

View File

@ -1,25 +1,25 @@
appdirs==1.4.4 ; python_version >= "3.9" and python_version < "4.0" appdirs==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
attrs==23.1.0 ; python_version >= "3.9" and python_version < "4.0" attrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
cattrs==22.2.0 ; python_version >= "3.9" and python_version < "4.0" cattrs==22.1.0 ; python_version >= "3.8" and python_version < "4.0"
certifi==2022.12.7 ; python_version >= "3.9" and python_version < "4.0" certifi==2022.6.15 ; python_version >= "3.8" and python_version < "4"
charset-normalizer==3.1.0 ; python_version >= "3.9" and python_version < "4.0" charset-normalizer==2.1.1 ; python_version >= "3.8" and python_version < "4"
colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0" colorama==0.4.5 ; python_version >= "3.8" and python_version < "4.0"
country-converter==1.0.0 ; python_version >= "3.9" and python_version < "4.0" country-converter==0.7.7 ; python_version >= "3.8" and python_version < "4.0"
exceptiongroup==1.1.1 ; python_version >= "3.9" and python_version < "3.11" exceptiongroup==1.0.0rc9 ; python_version >= "3.8" and python_version <= "3.10"
ftfy==6.1.1 ; python_version >= "3.9" and python_version < "4" ftfy==6.1.1 ; python_version >= "3.8" and python_version < "4"
idna==3.4 ; python_version >= "3.9" and python_version < "4.0" idna==3.3 ; python_version >= "3.8" and python_version < "4"
langid==1.1.6 ; python_version >= "3.9" and python_version < "4.0" langid==1.1.6 ; python_version >= "3.8" and python_version < "4.0"
numpy==1.24.3 ; python_version >= "3.9" and python_version < "4.0" numpy==1.23.2 ; python_version < "4.0" and python_version >= "3.8"
pandas==2.0.1 ; python_version >= "3.9" and python_version < "4.0" pandas==1.4.4 ; python_version >= "3.8" and python_version < "4.0"
pyarrow==11.0.0 ; python_version >= "3.9" and python_version < "4.0" pycountry==22.3.5 ; python_version >= "3.8" and python_version < "4"
pycountry @ git+https://github.com/alanorth/pycountry@iso-codes-4.13.0 ; python_version >= "3.9" and python_version < "4.0" python-dateutil==2.8.2 ; python_version >= "3.8" and python_version < "4.0"
python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "4.0" python-stdnum==1.17 ; python_version >= "3.8" and python_version < "4.0"
python-stdnum==1.18 ; python_version >= "3.9" and python_version < "4.0" pytz==2022.2.1 ; python_version >= "3.8" and python_version < "4.0"
pytz==2023.3 ; python_version >= "3.9" and python_version < "4.0" requests-cache==0.9.6 ; python_version >= "3.8" and python_version < "4.0"
requests-cache==0.9.8 ; python_version >= "3.9" and python_version < "4.0" requests==2.28.1 ; python_version >= "3.8" and python_version < "4"
requests==2.29.0 ; python_version >= "3.9" and python_version < "4.0" setuptools==65.3.0 ; python_version >= "3.8" and python_version < "4"
six==1.16.0 ; python_version >= "3.9" and python_version < "4.0" six==1.16.0 ; python_version >= "3.8" and python_version < "4.0"
tzdata==2023.3 ; python_version >= "3.9" and python_version < "4.0" spdx-license-list==0.5.2 ; python_version >= "3.8" and python_version < "4.0"
url-normalize==1.4.3 ; python_version >= "3.9" and python_version < "4.0" url-normalize==1.4.3 ; python_version >= "3.8" and python_version < "4.0"
urllib3==1.26.15 ; python_version >= "3.9" and python_version < "4.0" urllib3==1.26.12 ; python_version >= "3.8" and python_version < "4"
wcwidth==0.2.6 ; python_version >= "3.9" and python_version < "4" wcwidth==0.2.5 ; python_version >= "3.8" and python_version < "4"

6
setup.cfg Normal file
View File

@ -0,0 +1,6 @@
[isort]
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
use_parentheses=True
line_length=88

37
setup.py Normal file
View File

@ -0,0 +1,37 @@
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = [
"pandas",
"python-stdnum",
"requests",
"requests-cache",
"pycountry",
"langid",
]
setuptools.setup(
name="csv-metadata-quality",
version="0.6.0",
author="Alan Orth",
author_email="aorth@mjanja.ch",
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem.",
license="GPLv3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alanorth/csv-metadata-quality",
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
packages=["csv_metadata_quality"],
entry_points={
"console_scripts": ["csv-metadata-quality = csv_metadata_quality.__main__:main"]
},
install_requires=install_requires,
)

View File

@ -25,7 +25,7 @@ def test_check_valid_issn():
result = check.issn(value) result = check.issn(value)
assert result is None assert result == None
def test_check_invalid_isbn(capsys): def test_check_invalid_isbn(capsys):
@ -46,7 +46,7 @@ def test_check_valid_isbn():
result = check.isbn(value) result = check.isbn(value)
assert result is None assert result == None
def test_check_missing_date(capsys): def test_check_missing_date(capsys):
@ -102,7 +102,7 @@ def test_check_valid_date():
result = check.date(value, field_name) result = check.date(value, field_name)
assert result is None assert result == None
def test_check_suspicious_characters(capsys): def test_check_suspicious_characters(capsys):
@ -128,7 +128,7 @@ def test_check_valid_iso639_1_language():
result = check.language(value) result = check.language(value)
assert result is None assert result == None
def test_check_valid_iso639_3_language(): def test_check_valid_iso639_3_language():
@ -138,7 +138,7 @@ def test_check_valid_iso639_3_language():
result = check.language(value) result = check.language(value)
assert result is None assert result == None
def test_check_invalid_iso639_1_language(capsys): def test_check_invalid_iso639_1_language(capsys):
@ -249,7 +249,7 @@ def test_check_common_filename_extension():
result = check.filename_extension(value) result = check.filename_extension(value)
assert result is None assert result == None
def test_check_incorrect_iso_639_1_language(capsys): def test_check_incorrect_iso_639_1_language(capsys):
@ -305,7 +305,7 @@ def test_check_correct_iso_639_1_language():
result = experimental.correct_language(series, exclude) result = experimental.correct_language(series, exclude)
assert result is None assert result == None
def test_check_correct_iso_639_3_language(): def test_check_correct_iso_639_3_language():
@ -321,7 +321,7 @@ def test_check_correct_iso_639_3_language():
result = experimental.correct_language(series, exclude) result = experimental.correct_language(series, exclude)
assert result is None assert result == None
def test_check_valid_spdx_license_identifier(): def test_check_valid_spdx_license_identifier():
@ -331,7 +331,7 @@ def test_check_valid_spdx_license_identifier():
result = check.spdx_license_identifier(license) result = check.spdx_license_identifier(license)
assert result is None assert result == None
def test_check_invalid_spdx_license_identifier(capsys): def test_check_invalid_spdx_license_identifier(capsys):
@ -339,7 +339,7 @@ def test_check_invalid_spdx_license_identifier(capsys):
license = "CC-BY-SA" license = "CC-BY-SA"
check.spdx_license_identifier(license) result = check.spdx_license_identifier(license)
captured = capsys.readouterr() captured = capsys.readouterr()
assert ( assert (
@ -362,7 +362,7 @@ def test_check_duplicate_item(capsys):
} }
df = pd.DataFrame(data=d) df = pd.DataFrame(data=d)
check.duplicate_items(df) result = check.duplicate_items(df)
captured = capsys.readouterr() captured = capsys.readouterr()
assert ( assert (
@ -379,7 +379,7 @@ def test_check_no_mojibake():
result = check.mojibake(field, field_name) result = check.mojibake(field, field_name)
assert result is None assert result == None
def test_check_mojibake(capsys): def test_check_mojibake(capsys):
@ -388,7 +388,7 @@ def test_check_mojibake(capsys):
field = "CIAT Publicaçao" field = "CIAT Publicaçao"
field_name = "dcterms.isPartOf" field_name = "dcterms.isPartOf"
check.mojibake(field, field_name) result = check.mojibake(field, field_name)
captured = capsys.readouterr() captured = capsys.readouterr()
assert ( assert (
@ -411,7 +411,7 @@ def test_check_doi_field():
result = check.citation_doi(series, exclude) result = check.citation_doi(series, exclude)
assert result is None assert result == None
def test_check_doi_only_in_citation(capsys): def test_check_doi_only_in_citation(capsys):
@ -448,7 +448,7 @@ def test_title_in_citation():
result = check.title_in_citation(series, exclude) result = check.title_in_citation(series, exclude)
assert result is None assert result == None
def test_title_not_in_citation(capsys): def test_title_not_in_citation(capsys):
@ -485,7 +485,7 @@ def test_country_matches_region():
result = check.countries_match_regions(series, exclude) result = check.countries_match_regions(series, exclude)
assert result is None assert result == None
def test_country_not_matching_region(capsys): def test_country_not_matching_region(capsys):
@ -510,5 +510,5 @@ def test_country_not_matching_region(capsys):
captured = capsys.readouterr() captured = capsys.readouterr()
assert ( assert (
captured.out captured.out
== f"{Fore.YELLOW}Missing region ({country} → {missing_region}): {Fore.RESET}{title}\n" == f"{Fore.YELLOW}Missing region ({missing_region}): {Fore.RESET}{title}\n"
) )