mirror of
https://github.com/ilri/csv-metadata-quality.git
synced 2025-05-09 22:56:01 +02:00
Compare commits
471 Commits
v0.2.2
...
2341c56c40
Author | SHA1 | Date | |
---|---|---|---|
2341c56c40
|
|||
5be2195325
|
|||
736948ed2c
|
|||
ee0b448355
|
|||
4f3174a543
|
|||
d5c25f82fa
|
|||
7b3e2b4e68 | |||
f92b2fe206 | |||
df040b70c7 | |||
10bc8f3e14 | |||
7e6e92ecaa
|
|||
a21ffb0fa8
|
|||
fb341dd9fa | |||
2e943ee4db | |||
6d3a9870d6 | |||
82ecf7119a | |||
1db21cf275 | |||
bcd1408798 | |||
ee8d255811 | |||
2cc2dbe952
|
|||
940a325d61
|
|||
59b3b307c9
|
|||
b305da3f0b
|
|||
96a486471c | |||
530cd5863b
|
|||
f6018c51b6
|
|||
80c3f5b45a
|
|||
ba4637ea34 | |||
355428a691 | |||
58d4de973e | |||
e1216dae3c | |||
6b650ff1b3 | |||
fa7bde6fc0 | |||
f89159fe32 | |||
02058c5a65 | |||
8fed6b71ff | |||
b005b28cbe | |||
c626290599 | |||
1a06470b64 | |||
d46a81672e | |||
2a50e75082 | |||
0d45e73983 | |||
3611aab425 | |||
5c4ad0eb41 | |||
f1f39722f6 | |||
1c03999582 | |||
1f637f32cd
|
|||
b8241e919d
|
|||
b8dc19cc3f
|
|||
93c9b739ac
|
|||
4ed2786703
|
|||
8728789183 | |||
bf90464809
|
|||
1878002391 | |||
d21d2621e3 | |||
f3fb1ff7fb | |||
1fa81f7558 | |||
7409193b6b | |||
a84fcf0b7b
|
|||
25ac290df4
|
|||
3f52bad1e3
|
|||
0208ad0ade | |||
3632ae0fc9 | |||
17d089cc6e
|
|||
bc470a4343
|
|||
be609a809d
|
|||
de3387ded7
|
|||
f343e87f0c
|
|||
7d3524fbd5
|
|||
c614b71a52 | |||
d159a839f3 | |||
36e2ebe5f4
|
|||
33f67b7a7c
|
|||
c0e1448439
|
|||
5d0804a08f
|
|||
f01c9edf17
|
|||
8d4295b2b3
|
|||
e2d46e9495
|
|||
1491e1edb0
|
|||
34142c3e6b
|
|||
0c88b96e8d
|
|||
2e55b4d6e3
|
|||
c90aad29f0
|
|||
6fd1e1377f
|
|||
c64b7eb1f1
|
|||
29cbc4f3a3
|
|||
307af1acfc
|
|||
b5106de9df
|
|||
9eeadfc44e
|
|||
d4aed378cf
|
|||
20a2cce34b
|
|||
d661ffe439
|
|||
45a310387a
|
|||
47b03c49ba
|
|||
986b81cbf4
|
|||
d43a47ae32
|
|||
ede37569f1
|
|||
0c53efe60a
|
|||
5f0e25b818
|
|||
4776154d6c
|
|||
fdccdf7318
|
|||
ff2c986eec
|
|||
547574866e
|
|||
8aa7b93d87
|
|||
53fdb50906
|
|||
3e0e9a7f8b
|
|||
03d824b78e
|
|||
8bc4cd419c
|
|||
bde38e9ed4
|
|||
8db1e36a6d
|
|||
fbb625be5c
|
|||
084b970798
|
|||
171b35b015
|
|||
545bb8cd0c
|
|||
d5afbad788
|
|||
d40c9ed97a
|
|||
c4a2ee8563
|
|||
3596381d03
|
|||
5abd32a41f
|
|||
0ed0fabe21
|
|||
d5cfec65bd
|
|||
66893753ba
|
|||
57be05ebb6
|
|||
8c23382b22
|
|||
f640161d87
|
|||
e81ae93bf0
|
|||
50ea5863dd
|
|||
2dfb073b6b
|
|||
7cc49b500d
|
|||
051777bcec
|
|||
58e956360a
|
|||
3532175748
|
|||
a7bc929af8
|
|||
141b2e1da3
|
|||
7097136b7e
|
|||
d134c93663
|
|||
9858406894
|
|||
b02f1f65ee
|
|||
4d5ef38dde
|
|||
eaa8f31faf
|
|||
df57988e5a
|
|||
bddf4da559
|
|||
15f52f8be8
|
|||
bc909464c7
|
|||
2d46259dfe
|
|||
ca82820a8e
|
|||
86b4e5e182
|
|||
e5d5ae7e5d
|
|||
8f3db86a36
|
|||
b0721b0a7a
|
|||
4e5faf51bd | |||
5ea38d65bd | |||
58b7b6e9d8
|
|||
ffdf1eca7b
|
|||
59742e47f1
|
|||
9c741b1d49
|
|||
21e9948a75
|
|||
f64435fc9d
|
|||
566c2b45cf
|
|||
41b813be6e
|
|||
040e56fc76
|
|||
1f76247353
|
|||
2e489fc921
|
|||
117c6ca85d
|
|||
f49214fa2e
|
|||
7ce20726d0
|
|||
473be5ac2f
|
|||
7c61cae417 | |||
ae16289637
|
|||
fdb7900cd0
|
|||
9c65569c43
|
|||
0cf0bc97f0
|
|||
40c3585bab
|
|||
b9c44aed7d
|
|||
032a1db392
|
|||
da87531779
|
|||
689ee184f7
|
|||
344993370c
|
|||
00b4dca185
|
|||
5a87bf4317
|
|||
c706719d8b
|
|||
e7ea8ef9f0
|
|||
ea050376fc
|
|||
4ba615cd41
|
|||
b0d46cd864
|
|||
3ee9319d84
|
|||
4d5f4b5abb
|
|||
98d38801fa
|
|||
dad7a8765c
|
|||
d126304534
|
|||
38c2584863
|
|||
e94a4539bf
|
|||
a589d39e38
|
|||
d9e427a80e
|
|||
8ee5e2e306
|
|||
490701f244
|
|||
e1b270cf83
|
|||
b7efe2de40
|
|||
c43095139a
|
|||
a7727b8431
|
|||
7763a021c5
|
|||
3c12ef3f66
|
|||
aee2438e94
|
|||
a351ba9706
|
|||
e4faf114dc
|
|||
ff49a80432
|
|||
8b15154285
|
|||
5854f8e865
|
|||
e7322efadd
|
|||
95015febbd
|
|||
cef6c66b30
|
|||
9905e183ea
|
|||
cc34db7ff8
|
|||
b79e07b814
|
|||
865b950c33
|
|||
6f269ca6b1
|
|||
120e8cf09f
|
|||
a4eb79f625
|
|||
ccc2a73456
|
|||
ad33195ba3
|
|||
72fe38972e
|
|||
04232d0ede
|
|||
f5fa33bbc6
|
|||
1b978159c1
|
|||
4d5696c4cb
|
|||
e02678cd7c
|
|||
01b4354a14
|
|||
3b40a68279
|
|||
999cc65097
|
|||
a7c3be280d
|
|||
69f68e0a72
|
|||
c941a90944 | |||
c95261f522
|
|||
787fa9e8d9
|
|||
82261f7fe0
|
|||
8a27fb2589
|
|||
831ce979c3
|
|||
58ef62fbcd
|
|||
8c59f57e76
|
|||
72dd3e7272
|
|||
6ba16d5d4c
|
|||
81069259ba
|
|||
54ab869297
|
|||
22b359c8a8
|
|||
3e06788d88
|
|||
3c41cc283f
|
|||
5741e94571
|
|||
215d61c188
|
|||
11ddde3327
|
|||
a347878d43
|
|||
a89bc331f0
|
|||
af3493c724
|
|||
52644bf83e
|
|||
c8f5539d21
|
|||
382d0d6aed
|
|||
b8f4be9ebb
|
|||
4e2eab68b0
|
|||
55165cb4ce
|
|||
93d3eabfba
|
|||
a8fe623f4c
|
|||
dbc0437d59
|
|||
96ce1daa90
|
|||
3adb52d7c0
|
|||
f958d1879f
|
|||
bd8943f36a
|
|||
28f9026286
|
|||
cfe09f7126
|
|||
8eddb76aab
|
|||
a04dbc50db
|
|||
28335ed159
|
|||
773a0a2695
|
|||
39a4b1a487
|
|||
898bb412c3
|
|||
e92ec5d371
|
|||
f816e17fe7
|
|||
9061c7c79b
|
|||
661d05b977
|
|||
652b7ea98c
|
|||
65da6e9b05
|
|||
a313b7527a
|
|||
51ee370697
|
|||
e8422bfa74
|
|||
9f2dc0a0f5
|
|||
14010896a5
|
|||
ab3af2ec62
|
|||
1aa2084230
|
|||
330a7b7b9c
|
|||
9a5e3fd6ef
|
|||
ed084da08c
|
|||
10612cf891
|
|||
3656e9f976
|
|||
c9c277f8df
|
|||
fb35afd937
|
|||
0e9176f0a6
|
|||
1008acf35e
|
|||
f00a07e2cd
|
|||
46098861ed
|
|||
fa84cfa440
|
|||
6cc1401f88
|
|||
ad2cda8a41
|
|||
dc6920802e
|
|||
6ca449d8ed
|
|||
1554cfd5c9
|
|||
00b8faad6d
|
|||
b19d81abdd
|
|||
a0ea829f5c
|
|||
0089efa914
|
|||
3dbe656f9f
|
|||
7ad821dcad
|
|||
cd876c4fb3
|
|||
d88ea56488
|
|||
e0e3ca6c58
|
|||
abae8ca4fb
|
|||
d7d4d4efca
|
|||
5318953150
|
|||
3b17914002
|
|||
6e4b0e5c1b
|
|||
b16fa9121f
|
|||
202bda862a
|
|||
7479310ac0
|
|||
98a91bc9c2
|
|||
fc5bedcc5c
|
|||
44d12d771a
|
|||
4a7000e975
|
|||
27b2d81ca8
|
|||
91ebd0f606
|
|||
dd2cfae047
|
|||
d76e72532a
|
|||
13980d2dde
|
|||
9aaaa62461
|
|||
a7fc5a246c
|
|||
7fb8acb866
|
|||
9f5d2c2c4f
|
|||
202abf140c
|
|||
0cd6d3dfe6
|
|||
a458beac55
|
|||
e62ecb0a8f
|
|||
de92f32ab6
|
|||
dbbbc0944a
|
|||
d17bf3033c
|
|||
2ec52f1b73
|
|||
aa1abf15a7
|
|||
cbf94490f2
|
|||
f3d0d5ef07
|
|||
4b7b99c94c
|
|||
df670e81b9
|
|||
ae357d8c6c
|
|||
ca80340f7a
|
|||
cc1743b86d
|
|||
bcb9885c6b
|
|||
b484b75178
|
|||
d3880a9dfa
|
|||
7edb8b19d7
|
|||
a6709c7f82
|
|||
d489ea4609
|
|||
96634cbb67
|
|||
29e67a0887
|
|||
32cea2055f
|
|||
0dc66c5c4e
|
|||
c26ad83534
|
|||
72ca9d99bf
|
|||
ae33a9b793
|
|||
fc0367bfc8
|
|||
e33b285034
|
|||
349fca03b8
|
|||
52d8904870
|
|||
971c69e535 | |||
f8cc233e25
|
|||
aa7b7a9592
|
|||
57b455bde7
|
|||
23b95fa368
|
|||
6985f76aa3
|
|||
98a6a19e12
|
|||
f4914c414f
|
|||
d352fe8017
|
|||
f13c360084
|
|||
7cfd4c0b59
|
|||
826509ddcf
|
|||
22b5c0f7a1
|
|||
774e274b32
|
|||
db474a802f
|
|||
e241f8461b
|
|||
431e6331c8
|
|||
cb07d357d4
|
|||
65cd48a26f
|
|||
0f883f640c
|
|||
f4c5c5781e
|
|||
6aa784ad8c
|
|||
7b8da94f41
|
|||
2a1566af62
|
|||
5fcaa63bd5
|
|||
aa9e23b46c
|
|||
73acb1661f
|
|||
2a068fddc4
|
|||
c6c2f13e88
|
|||
56f16e37ed
|
|||
0c44b967b6
|
|||
8a267bb40b
|
|||
8fda8f1ef1
|
|||
5e471813e8
|
|||
79244b9ac3
|
|||
5e81a33482
|
|||
28b5996aa6
|
|||
40ba9bae6c
|
|||
0b2d211455
|
|||
7f1df0b47c
|
|||
365ecda324
|
|||
550ce7fb7e
|
|||
705127fd28
|
|||
894e0a196d
|
|||
87181bc7b8
|
|||
8de5d862b6
|
|||
49e3543878
|
|||
403b253762
|
|||
c5fbaf407a
|
|||
4f81f6c83c
|
|||
4b9d1e060f
|
|||
c8a71e3143
|
|||
7964d98ca5
|
|||
64ffc2f1da
|
|||
7b1bc29a92
|
|||
f0110d8e74
|
|||
86498deee8
|
|||
251647a15f
|
|||
0bd28e22ec
|
|||
63fdce7d13
|
|||
f068c0e16a
|
|||
79b8f62a85
|
|||
6c1e132531
|
|||
c0f3c866bd
|
|||
36d0474b95
|
|||
efdc3a841a
|
|||
fd2ba6845d
|
|||
e55380b4d5
|
|||
85ae16d9b7
|
|||
c42f8b4812
|
|||
1c75608d54
|
|||
0b15a8ed3b
|
|||
9ca266f5f0
|
|||
0d3f948708
|
|||
c04207fcfc
|
|||
9d4eceddc7
|
|||
e15c98cccb
|
|||
93c4e1a993
|
|||
9963b2bb64
|
|||
76291c1876
|
|||
604bd5bda6
|
|||
e7c220039b
|
|||
d7b5e378bc
|
|||
8435ee242d
|
|||
7ac1c6f554
|
|||
86d4623fd3
|
|||
ddbe970342
|
|||
31c78ca6f3
|
|||
154d05b5e2
|
|||
186f146edb
|
|||
a4cb301943
|
|||
219e37526d
|
|||
f304ca6a33
|
|||
3d5c8bdf5d
|
|||
480956d54d
|
|||
d9fc09f121
|
|||
b5899001b7
|
|||
c92977d1ca
|
|||
280a99c8a8
|
|||
0388145b81
|
|||
d97dcd19db
|
|||
b375f0e895
|
|||
865c61d316
|
|||
3b2ba57b75
|
|||
2805c556a9
|
19
.build.yml
19
.build.yml
@ -1,19 +0,0 @@
|
||||
image: archlinux
|
||||
packages:
|
||||
- python-pipenv
|
||||
sources:
|
||||
- https://git.sr.ht/~alanorth/csv-metadata-quality
|
||||
tasks:
|
||||
- setup: |
|
||||
cd csv-metadata-quality
|
||||
pipenv install --dev
|
||||
- pytest: |
|
||||
cd csv-metadata-quality
|
||||
pipenv run pytest
|
||||
- testcli: |
|
||||
cd csv-metadata-quality
|
||||
pipenv run pip install .
|
||||
pipenv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u --agrovoc-fields dc.subject,cg.coverage.country
|
||||
environment:
|
||||
PIPENV_NOSPIN: 'True'
|
||||
PIPENV_HIDE_EMOJIS: 'True'
|
91
.drone.yml
Normal file
91
.drone.yml
Normal file
@ -0,0 +1,91 @@
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: python311
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
image: python:3.11-slim
|
||||
commands:
|
||||
- id
|
||||
- python -V
|
||||
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
|
||||
- python -m pip install poetry
|
||||
- poetry install
|
||||
- poetry run pytest
|
||||
# Basic test
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
|
||||
# Basic test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
|
||||
# Geography test
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
|
||||
# Geography test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
|
||||
# Test with experimental checks
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
|
||||
# Test with AGROVOC validation
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
|
||||
# Test with AGROVOC validation (and dropping invalid)
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: python310
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
image: python:3.10-slim
|
||||
commands:
|
||||
- id
|
||||
- python -V
|
||||
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
|
||||
- python -m pip install poetry
|
||||
- poetry install
|
||||
- poetry run pytest
|
||||
# Basic test
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
|
||||
# Basic test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
|
||||
# Geography test
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
|
||||
# Geography test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
|
||||
# Test with experimental checks
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
|
||||
# Test with AGROVOC validation
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
|
||||
# Test with AGROVOC validation (and dropping invalid)
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: python39
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
image: python:3.9-slim
|
||||
commands:
|
||||
- id
|
||||
- python -V
|
||||
- apt update && apt install -y gcc g++ libicu-dev pkg-config git
|
||||
- python -m pip install poetry
|
||||
- poetry install
|
||||
- poetry run pytest
|
||||
# Basic test
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
|
||||
# Basic test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
|
||||
# Geography test
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv
|
||||
# Geography test with unsafe fixes
|
||||
- poetry run csv-metadata-quality -i data/test-geography.csv -o /tmp/test.csv -u
|
||||
# Test with experimental checks
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
|
||||
# Test with AGROVOC validation
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
|
||||
# Test with AGROVOC validation (and dropping invalid)
|
||||
- poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
|
||||
|
||||
# vim: ts=2 sw=2 et
|
45
.github/workflows/python-app.yml
vendored
Normal file
45
.github/workflows/python-app.yml
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a single version of Python
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Build and Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'poetry'
|
||||
- run: poetry install
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
poetry run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
poetry run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: poetry run pytest
|
||||
- name: Test CLI
|
||||
run: |
|
||||
# Basic test
|
||||
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv
|
||||
# Test with unsafe fixes
|
||||
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u
|
||||
# Test with experimental checks
|
||||
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
|
||||
# Test with AGROVOC validation
|
||||
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject
|
||||
# Test with AGROVOC validation (and dropping invalid)
|
||||
poetry run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d
|
11
.travis.yml
11
.travis.yml
@ -1,11 +0,0 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
python:
|
||||
- "3.6"
|
||||
- "3.7"
|
||||
install:
|
||||
- "pip install pipenv --upgrade-strategy=only-if-needed"
|
||||
- "pipenv install --dev"
|
||||
script: pytest
|
||||
|
||||
# vim: ts=2 sw=2 et
|
183
CHANGELOG.md
183
CHANGELOG.md
@ -4,6 +4,189 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## Unreleased
|
||||
### Added
|
||||
- Ability to normalize DOIs to https://doi.org URI format
|
||||
|
||||
### Fixed
|
||||
- Fixed regex so we don't run the invalid multi-value separator fix on
|
||||
`dcterms.bibliographicCitation` fields
|
||||
- Fixed regex so we run the comma space fix on `dcterms.bibliographicCitation`
|
||||
fields
|
||||
- Don't crash the country/region checker/fixer when a title field is missing
|
||||
|
||||
### Changed
|
||||
- Don't run newline fix on description fields
|
||||
- Install requests-cache in main run() function instead of check.agrovoc() function so we only incur the overhead once
|
||||
- Use py3langid instead of langid, see: [How to make language detection with langid.py faster](https://adrien.barbaresi.eu/blog/language-detection-langid-py-faster.html)
|
||||
|
||||
### Updated
|
||||
- Python dependencies, including Pandas 2.0.0 and [Arrow-backed dtypes](https://datapythonista.me/blog/pandas-20-and-the-arrow-revolution-part-i)
|
||||
- SPDX license list
|
||||
|
||||
## [0.6.1] - 2023-02-23
|
||||
### Fixed
|
||||
- Missing region check should ignore subregion field, if it exists
|
||||
|
||||
### Changed
|
||||
- Use SPDX license data from SPDX themselves instead of spdx-license-list
|
||||
because it is deprecated and outdated
|
||||
- Require Python 3.9+
|
||||
- Don't run `fix.separators()` on title or abstract fields
|
||||
- Don't run whitespace or newline fixes on abstract fields
|
||||
- Ignore some common non-SPDX licenses
|
||||
- Ignore `__description` suffix in filenames meant for SAFBuilder when checking
|
||||
for uncommon file extensions
|
||||
|
||||
### Updated
|
||||
- Python dependencies
|
||||
|
||||
## [0.6.0] - 2022-09-02
|
||||
### Changed
|
||||
- Perform fix for "unnecessary" Unicode characters after we try to fix encoding
|
||||
issues with ftfy
|
||||
- ftfy heuristics to use `is_bad()` instead of `sequence_weirdness()`
|
||||
- ftfy `fix_text()` to *not* change “smart quotes” to "ASCII quotes"
|
||||
|
||||
### Updated
|
||||
- Python dependencies
|
||||
- Metadatata field exclude logic
|
||||
|
||||
### Added
|
||||
- Ability to drop invalid AGROVOC values with `-d` when checking AGROVOC values
|
||||
with `-a <field.name>`
|
||||
- Ability to add missing UN M.49 regions when both country and region columns
|
||||
are present. Enable with `-u` (unsafe fixes) for now.
|
||||
|
||||
### Removed
|
||||
- Support for reading Excel files (both `.xls` and `.xlsx`) as it was completely
|
||||
untested
|
||||
|
||||
## [0.5.0] - 2021-12-08
|
||||
### Added
|
||||
- Ability to check for, and fix, "mojibake" characters using [ftfy](https://github.com/LuminosoInsight/python-ftfy)
|
||||
- Ability to check if the item's title exists in the citation
|
||||
- Ability to check if an item has countries, but no matching regions (only
|
||||
suggests missing regions if there is a region field in the CSV)
|
||||
|
||||
### Updated
|
||||
- Python dependencies
|
||||
|
||||
### Fixed
|
||||
- Regular expression to match all citation fields (dc.identifier.citation as
|
||||
well as dcterms.bibliographicCitation) in `experimental.correct_language()`
|
||||
- Regular expression to match dc.title and dcterms.title, but
|
||||
ignore dc.title.alternative `check.duplicate_items()`
|
||||
- Missing field name in `fix.newlines()` output
|
||||
|
||||
## [0.4.7] - 2021-03-17
|
||||
### Changed
|
||||
- Fixing invalid multi-value separators like `|` and `|||` is no longer class-
|
||||
ified as "unsafe" as I have yet to see a case where this was intentional
|
||||
- Not user visible, but now checks only print a warning to the screen instead
|
||||
of returning a value and re-writing the DataFrame, which should be faster and
|
||||
use less memory
|
||||
|
||||
### Added
|
||||
- Configurable directory for AGROVOC requests cache (to allow running the web
|
||||
version from Google App Engine where we can only write to /tmp)
|
||||
- Ability to check for duplicate items in the data set (uses a combination of
|
||||
the title, type, and date issued to determine uniqueness)
|
||||
|
||||
### Removed
|
||||
- Checks for invalid and unnecessary multi-value separators because now I fix
|
||||
them whenever I see them, so there is no need to have checks for them
|
||||
|
||||
### Updated
|
||||
- Run `poetry update` to update project dependencies
|
||||
|
||||
## [0.4.6] - 2021-03-11
|
||||
### Added
|
||||
- Validation of dcterms.license field against SPDX license identifiers
|
||||
|
||||
### Changed
|
||||
- Use DCTERMS fields where possible in `data/test.csv`
|
||||
|
||||
### Updated
|
||||
- Run `poetry update` to update project dependencies
|
||||
|
||||
### Fixed
|
||||
- Output for all fixes should be green, because it is good
|
||||
|
||||
## [0.4.5] - 2021-03-04
|
||||
### Added
|
||||
- Check dates in dcterms.issued field as well, not just fields that have the
|
||||
word "date" in them
|
||||
|
||||
### Updated
|
||||
- Run `poetry update` to update project dependencies
|
||||
|
||||
## [0.4.4] - 2021-02-21
|
||||
### Added
|
||||
- Accept dates formatted in ISO 8601 extended with combined date and time, for
|
||||
example: 2020-08-31T11:04:56Z
|
||||
- Colorized output: red for errors, yellow for warnings and information, green
|
||||
for changes
|
||||
|
||||
### Updated
|
||||
- Run `poetry update` to update project dependencies
|
||||
|
||||
## [0.4.3] - 2021-01-26
|
||||
### Changed
|
||||
- Reformat with black
|
||||
- Requires Python 3.7+ for pandas 1.2.0
|
||||
|
||||
### Updated
|
||||
- Run `poetry update`
|
||||
- Expand check/fix for multi-value separators to include metadata with invalid
|
||||
separators at the end, for example "Kenya||Tanzania||"
|
||||
|
||||
## [0.4.2] - 2020-07-06
|
||||
### Changed
|
||||
- Add field name to the output for more fixes and checks to help identify where
|
||||
the error is
|
||||
- Minor optimizations to AGROVOC subject lookup
|
||||
- Use Poetry instead of Pipenv
|
||||
|
||||
### Updated
|
||||
- Update python dependencies to latest versions
|
||||
|
||||
## [0.4.1] - 2020-01-15
|
||||
### Changed
|
||||
- Reduce minimum Python version to 3.6 by working around the `is_normalized()`
|
||||
that only works in Python >= 3.8
|
||||
|
||||
## [0.4.0] - 2020-01-15
|
||||
### Added
|
||||
- Unicode normalization (enable with `--unsafe-fixes`, see README.md)
|
||||
|
||||
### Updated
|
||||
- Update python dependencies to latest versions, including numpy 1.18.1, pandas
|
||||
1.0.0rc0, flake8 3.7.9, pytest 5.3.2, and black 19.10b0
|
||||
- Regenerate requirements.txt and requirements-dev.txt
|
||||
|
||||
### Changed
|
||||
- Use Python 3.8.0 for pipenv
|
||||
- Use Ubuntu 18.04 "Bionic" for TravisCI builds
|
||||
- Test Python 3.8 in TravisCI builds
|
||||
|
||||
## [0.3.1] - 2019-10-01
|
||||
## Changed
|
||||
- Replace non-breaking spaces (U+00A0) with space instead of removing them
|
||||
- Harmonize language of script output when fixing various issues
|
||||
|
||||
## [0.3.0] - 2019-09-26
|
||||
### Updated
|
||||
- Update python dependencies to latest versions, including numpy 1.17.2, pandas
|
||||
0.25.1, pytest 5.1.3, and requests-cache 0.5.2
|
||||
|
||||
### Added
|
||||
- csvkit to dev requirements (csvcut etc are useful during development)
|
||||
- Experimental language validation using the Python `langid` library (enable with `-e`, see README.md)
|
||||
|
||||
### Changed
|
||||
- Re-formatted code with black and isort
|
||||
|
||||
## [0.2.2] - 2019-08-27
|
||||
### Changed
|
||||
- Output of date checks to include column names (helps debugging in case there are multiple date fields)
|
||||
|
19
CITATION.cff
Normal file
19
CITATION.cff
Normal file
@ -0,0 +1,19 @@
|
||||
cff-version: "1.1.0"
|
||||
abstract: "A simple but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem."
|
||||
authors:
|
||||
-
|
||||
affiliation: "International Livestock Research Institute"
|
||||
family-names: Orth
|
||||
given-names: "Alan S."
|
||||
orcid: "https://orcid.org/0000-0002-1735-7458"
|
||||
date-released: 2019-07-26
|
||||
doi: "10568/110997"
|
||||
keywords:
|
||||
- dspace
|
||||
- "dublin-core"
|
||||
- csv
|
||||
- metadata
|
||||
license: "GPL-3.0-only"
|
||||
message: "If you use this software, please cite it using these metadata."
|
||||
repository-code: "https://github.com/ilri/csv-metadata-quality"
|
||||
title: "DSpace CSV Metadata Quality Checker"
|
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
@ -0,0 +1 @@
|
||||
include csv_metadata_quality/data/licenses.json
|
25
Pipfile
25
Pipfile
@ -1,25 +0,0 @@
|
||||
[[source]]
|
||||
name = "pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
|
||||
[dev-packages]
|
||||
pytest = "*"
|
||||
ipython = "*"
|
||||
flake8 = "*"
|
||||
pytest-clarity = "*"
|
||||
|
||||
[packages]
|
||||
pandas = "*"
|
||||
python-stdnum = "*"
|
||||
xlrd = "*"
|
||||
requests = "*"
|
||||
requests-cache = "*"
|
||||
pycountry = "*"
|
||||
csv-metadata-quality = {editable = true,path = "."}
|
||||
|
||||
[requires]
|
||||
python_version = "3.7"
|
||||
|
||||
[pipenv]
|
||||
allow_prereleases = true
|
376
Pipfile.lock
generated
376
Pipfile.lock
generated
@ -1,376 +0,0 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "f8f0a9f208ec41f4d8183ecfc68356b40674b083b2f126c37468b3c9533ba5df"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.7"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939",
|
||||
"sha256:945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695"
|
||||
],
|
||||
"version": "==2019.6.16"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"csv-metadata-quality": {
|
||||
"editable": true,
|
||||
"path": "."
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
|
||||
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"
|
||||
],
|
||||
"version": "==2.8"
|
||||
},
|
||||
"numpy": {
|
||||
"hashes": [
|
||||
"sha256:03e311b0a4c9f5755da7d52161280c6a78406c7be5c5cc7facfbcebb641efb7e",
|
||||
"sha256:0cdd229a53d2720d21175012ab0599665f8c9588b3b8ffa6095dd7b90f0691dd",
|
||||
"sha256:312bb18e95218bedc3563f26fcc9c1c6bfaaf9d453d15942c0839acdd7e4c473",
|
||||
"sha256:464b1c48baf49e8505b1bb754c47a013d2c305c5b14269b5c85ea0625b6a988a",
|
||||
"sha256:5adfde7bd3ee4864536e230bcab1c673f866736698724d5d28c11a4d63672658",
|
||||
"sha256:7724e9e31ee72389d522b88c0d4201f24edc34277999701ccd4a5392e7d8af61",
|
||||
"sha256:8d36f7c53ae741e23f54793ffefb2912340b800476eb0a831c6eb602e204c5c4",
|
||||
"sha256:910d2272403c2ea8a52d9159827dc9f7c27fb4b263749dca884e2e4a8af3b302",
|
||||
"sha256:951fefe2fb73f84c620bec4e001e80a80ddaa1b84dce244ded7f1e0cbe0ed34a",
|
||||
"sha256:9588c6b4157f493edeb9378788dcd02cb9e6a6aeaa518b511a1c79d06cbd8094",
|
||||
"sha256:9ce8300950f2f1d29d0e49c28ebfff0d2f1e2a7444830fbb0b913c7c08f31511",
|
||||
"sha256:be39cca66cc6806652da97103605c7b65ee4442c638f04ff064a7efd9a81d50a",
|
||||
"sha256:c3ab2d835b95ccb59d11dfcd56eb0480daea57cdf95d686d22eff35584bc4554",
|
||||
"sha256:eb0fc4a492cb896346c9e2c7a22eae3e766d407df3eb20f4ce027f23f76e4c54",
|
||||
"sha256:ec0c56eae6cee6299f41e780a0280318a93db519bbb2906103c43f3e2be1206c",
|
||||
"sha256:f4e4612de60a4f1c4d06c8c2857cdcb2b8b5289189a12053f37d3f41f06c60d0"
|
||||
],
|
||||
"version": "==1.17.0"
|
||||
},
|
||||
"pandas": {
|
||||
"hashes": [
|
||||
"sha256:074a032f99bb55d178b93bd98999c971542f19317829af08c99504febd9e9b8b",
|
||||
"sha256:20f1728182b49575c2f6f681b3e2af5fac9e84abdf29488e76d569a7969b362e",
|
||||
"sha256:2745ba6e16c34d13d765c3657bb64fa20a0e2daf503e6216a36ed61770066179",
|
||||
"sha256:32c44e5b628c48ba17703f734d59f369d4cdcb4239ef26047d6c8a8bfda29a6b",
|
||||
"sha256:3b9f7dcee6744d9dcdd53bce19b91d20b4311bf904303fa00ef58e7df398e901",
|
||||
"sha256:544f2033250980fb6f069ce4a960e5f64d99b8165d01dc39afd0b244eeeef7d7",
|
||||
"sha256:58f9ef68975b9f00ba96755d5702afdf039dea9acef6a0cfd8ddcde32918a79c",
|
||||
"sha256:9023972a92073a495eba1380824b197ad1737550fe1c4ef8322e65fe58662888",
|
||||
"sha256:914341ad2d5b1ea522798efa4016430b66107d05781dbfe7cf05eba8f37df995",
|
||||
"sha256:9d151bfb0e751e2c987f931c57792871c8d7ff292bcdfcaa7233012c367940ee",
|
||||
"sha256:b932b127da810fef57d427260dde1ad54542c136c44b227a1e367551bb1a684b",
|
||||
"sha256:cfb862aa37f4dd5be0730731fdb8185ac935aba8b51bf3bd035658111c9ee1c9",
|
||||
"sha256:de7ecb4b120e98b91e8a2a21f186571266a8d1faa31d92421e979c7ca67d8e5c",
|
||||
"sha256:df7e1933a0b83920769611c5d6b9a1bf301e3fa6a544641c6678c67621fe9843"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.25.0"
|
||||
},
|
||||
"pycountry": {
|
||||
"hashes": [
|
||||
"sha256:68e58bfd3bedeea49ba9d4b38f2bd5e042f9753628eba9a819fb03f551d89096"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==19.7.15"
|
||||
},
|
||||
"python-dateutil": {
|
||||
"hashes": [
|
||||
"sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb",
|
||||
"sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e"
|
||||
],
|
||||
"version": "==2.8.0"
|
||||
},
|
||||
"python-stdnum": {
|
||||
"hashes": [
|
||||
"sha256:d5f0af1bee9ddd9a20b398b46ce062dbd4d41fcc9646940f2667256a44df3854",
|
||||
"sha256:f445ec32bf5246c90389204cabba465f494545371c29a83fa2d30e6c872a6763"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.11"
|
||||
},
|
||||
"pytz": {
|
||||
"hashes": [
|
||||
"sha256:26c0b32e437e54a18161324a2fca3c4b9846b74a8dccddd843113109e1116b32",
|
||||
"sha256:c894d57500a4cd2d5c71114aaab77dbab5eabd9022308ce5ac9bb93a60a6f0c7"
|
||||
],
|
||||
"version": "==2019.2"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4",
|
||||
"sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.22.0"
|
||||
},
|
||||
"requests-cache": {
|
||||
"hashes": [
|
||||
"sha256:6822f788c5ee248995c4bfbd725de2002ad710182ba26a666e85b64981866060",
|
||||
"sha256:73a7211870f7d67af5fd81cad2f67cfe1cd3eb4ee6a85155e07613968cc72dfc"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.5.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||
],
|
||||
"version": "==1.12.0"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1",
|
||||
"sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232"
|
||||
],
|
||||
"version": "==1.25.3"
|
||||
},
|
||||
"xlrd": {
|
||||
"hashes": [
|
||||
"sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2",
|
||||
"sha256:e551fb498759fa3a5384a94ccd4c3c02eb7c00ea424426e212ac0c57be9dfbde"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.2.0"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"atomicwrites": {
|
||||
"hashes": [
|
||||
"sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4",
|
||||
"sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"
|
||||
],
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79",
|
||||
"sha256:f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399"
|
||||
],
|
||||
"version": "==19.1.0"
|
||||
},
|
||||
"backcall": {
|
||||
"hashes": [
|
||||
"sha256:38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4",
|
||||
"sha256:bbbf4b1e5cd2bdb08f915895b51081c041bac22394fdfcfdfbe9f14b77c08bf2"
|
||||
],
|
||||
"version": "==0.1.0"
|
||||
},
|
||||
"decorator": {
|
||||
"hashes": [
|
||||
"sha256:86156361c50488b84a3f148056ea716ca587df2f0de1d34750d35c21312725de",
|
||||
"sha256:f069f3a01830ca754ba5258fde2278454a0b5b79e0d7f5c13b3b97e57d4acff6"
|
||||
],
|
||||
"version": "==4.4.0"
|
||||
},
|
||||
"entrypoints": {
|
||||
"hashes": [
|
||||
"sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19",
|
||||
"sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"
|
||||
],
|
||||
"version": "==0.3"
|
||||
},
|
||||
"flake8": {
|
||||
"hashes": [
|
||||
"sha256:19241c1cbc971b9962473e4438a2ca19749a7dd002dd1a946eaba171b4114548",
|
||||
"sha256:8e9dfa3cecb2400b3738a42c54c3043e821682b9c840b0448c0503f781130696"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.7.8"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:23d3d873e008a513952355379d93cbcab874c58f4f034ff657c7a87422fa64e8",
|
||||
"sha256:80d2de76188eabfbfcf27e6a37342c2827801e59c4cc14b0371c56fed43820e3"
|
||||
],
|
||||
"version": "==0.19"
|
||||
},
|
||||
"ipython": {
|
||||
"hashes": [
|
||||
"sha256:1d3a1692921e932751bc1a1f7bb96dc38671eeefdc66ed33ee4cbc57e92a410e",
|
||||
"sha256:537cd0176ff6abd06ef3e23f2d0c4c2c8a4d9277b7451544c6cbf56d1c79a83d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==7.7.0"
|
||||
},
|
||||
"ipython-genutils": {
|
||||
"hashes": [
|
||||
"sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8",
|
||||
"sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"
|
||||
],
|
||||
"version": "==0.2.0"
|
||||
},
|
||||
"jedi": {
|
||||
"hashes": [
|
||||
"sha256:53c850f1a7d3cfcd306cc513e2450a54bdf5cacd7604b74e42dd1f0758eaaf36",
|
||||
"sha256:e07457174ef7cb2342ff94fa56484fe41cec7ef69b0059f01d3f812379cb6f7c"
|
||||
],
|
||||
"version": "==0.14.1"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
"sha256:409cd48d4db7052af495b09dec721011634af3753ae1ef92d2b32f73a745f832",
|
||||
"sha256:92b8c4b06dac4f0611c0729b2f2ede52b2e1bac1ab48f089c7ddc12e26bb60c4"
|
||||
],
|
||||
"version": "==7.2.0"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:a7ac867b97fdc07ee80a8058fe4435ccd274ecc3b0ed61d852d7d53055528cf9",
|
||||
"sha256:c491ca87294da7cc01902edbe30a5bc6c4c28172b5138ab4e4aa1b9d7bfaeafe"
|
||||
],
|
||||
"version": "==19.1"
|
||||
},
|
||||
"parso": {
|
||||
"hashes": [
|
||||
"sha256:63854233e1fadb5da97f2744b6b24346d2750b85965e7e399bec1620232797dc",
|
||||
"sha256:666b0ee4a7a1220f65d367617f2cd3ffddff3e205f3f16a0284df30e774c2a9c"
|
||||
],
|
||||
"version": "==0.5.1"
|
||||
},
|
||||
"pexpect": {
|
||||
"hashes": [
|
||||
"sha256:2094eefdfcf37a1fdbfb9aa090862c1a4878e5c7e0e7e7088bdb511c558e5cd1",
|
||||
"sha256:9e2c1fd0e6ee3a49b28f95d4b33bc389c89b20af6a1255906e90ff1262ce62eb"
|
||||
],
|
||||
"markers": "sys_platform != 'win32'",
|
||||
"version": "==4.7.0"
|
||||
},
|
||||
"pickleshare": {
|
||||
"hashes": [
|
||||
"sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca",
|
||||
"sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"
|
||||
],
|
||||
"version": "==0.7.5"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc",
|
||||
"sha256:b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c"
|
||||
],
|
||||
"version": "==0.12.0"
|
||||
},
|
||||
"prompt-toolkit": {
|
||||
"hashes": [
|
||||
"sha256:11adf3389a996a6d45cc277580d0d53e8a5afd281d0c9ec71b28e6f121463780",
|
||||
"sha256:2519ad1d8038fd5fc8e770362237ad0364d16a7650fb5724af6997ed5515e3c1",
|
||||
"sha256:977c6583ae813a37dc1c2e1b715892461fcbdaa57f6fc62f33a528c4886c8f55"
|
||||
],
|
||||
"version": "==2.0.9"
|
||||
},
|
||||
"ptyprocess": {
|
||||
"hashes": [
|
||||
"sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0",
|
||||
"sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"
|
||||
],
|
||||
"version": "==0.6.0"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa",
|
||||
"sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53"
|
||||
],
|
||||
"version": "==1.8.0"
|
||||
},
|
||||
"pycodestyle": {
|
||||
"hashes": [
|
||||
"sha256:95a2219d12372f05704562a14ec30bc76b05a5b297b21a5dfe3f6fac3491ae56",
|
||||
"sha256:e40a936c9a450ad81df37f549d676d127b1b66000a6c500caa2b085bc0ca976c"
|
||||
],
|
||||
"version": "==2.5.0"
|
||||
},
|
||||
"pyflakes": {
|
||||
"hashes": [
|
||||
"sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0",
|
||||
"sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2"
|
||||
],
|
||||
"version": "==2.1.1"
|
||||
},
|
||||
"pygments": {
|
||||
"hashes": [
|
||||
"sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127",
|
||||
"sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297"
|
||||
],
|
||||
"version": "==2.4.2"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:6f98a7b9397e206d78cc01df10131398f1c8b8510a2f4d97d9abd82e1aacdd80",
|
||||
"sha256:d9338df12903bbf5d65a0e4e87c2161968b10d2e489652bb47001d82a9b028b4"
|
||||
],
|
||||
"version": "==2.4.2"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:6ef6d06de77ce2961156013e9dff62f1b2688aa04d0dc244299fe7d67e09370d",
|
||||
"sha256:a736fed91c12681a7b34617c8fcefe39ea04599ca72c608751c31d89579a3f77"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.0.1"
|
||||
},
|
||||
"pytest-clarity": {
|
||||
"hashes": [
|
||||
"sha256:3f40d5ae7cb21cc95e622fc4f50d9466f80ae0f91460225b8c95c07afbf93e20"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.2.0a1"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||
],
|
||||
"version": "==1.12.0"
|
||||
},
|
||||
"termcolor": {
|
||||
"hashes": [
|
||||
"sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"
|
||||
],
|
||||
"version": "==1.1.0"
|
||||
},
|
||||
"traitlets": {
|
||||
"hashes": [
|
||||
"sha256:9c4bd2d267b7153df9152698efb1050a5d84982d3384a37b2c1f7723ba3e7835",
|
||||
"sha256:c6cb5e6f57c5a9bdaa40fa71ce7b4af30298fbab9ece9815b5d995ab6217c7d9"
|
||||
],
|
||||
"version": "==4.3.2"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e",
|
||||
"sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"
|
||||
],
|
||||
"version": "==0.1.7"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
"sha256:4970c3758f4e89a7857a973b1e2a5d75bcdc47794442f2e2dd4fe8e0466e809a",
|
||||
"sha256:8a5712cfd3bb4248015eb3b0b3c54a5f6ee3f2425963ef2a0125b8bc40aafaec"
|
||||
],
|
||||
"version": "==0.5.2"
|
||||
}
|
||||
}
|
||||
}
|
90
README.md
90
README.md
@ -1,31 +1,49 @@
|
||||
# CSV Metadata Quality [](https://travis-ci.org/ilri/csv-metadata-quality) [](https://builds.sr.ht/~alanorth/csv-metadata-quality?)
|
||||
A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem. The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, etc.
|
||||
<h1 align="center">DSpace CSV Metadata Quality Checker</h1>
|
||||
|
||||
Requires Python 3.6 or greater. CSV and Excel support comes from the [Pandas](https://pandas.pydata.org/) library, though your mileage may vary with Excel because this is much less tested.
|
||||
<p align="center">
|
||||
<a href="https://ci.mjanja.ch/alanorth/csv-metadata-quality"><img alt="Build Status" src="https://ci.mjanja.ch/api/badges/alanorth/csv-metadata-quality/status.svg"></a>
|
||||
<a href="https://github.com/ilri/csv-metadata-quality/actions"><img alt="Build and Test" src="https://github.com/ilri/csv-metadata-quality/workflows/Build%20and%20Test/badge.svg"></a>
|
||||
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
|
||||
</p>
|
||||
|
||||
A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem (though it could theoretically work on any CSV that uses Dublin Core fields as columns). The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, unnecessary Unicode, AGROVOC terms, etc.
|
||||
|
||||
Requires Python 3.9 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library.
|
||||
|
||||
If you use the DSpace CSV metadata quality checker please cite:
|
||||
|
||||
*Orth, A. 2019. DSpace CSV metadata quality checker. Nairobi, Kenya: ILRI. https://hdl.handle.net/10568/110997.*
|
||||
|
||||
## Functionality
|
||||
|
||||
- Validate dates, ISSNs, ISBNs, and multi-value separators ("||")
|
||||
- Validate languages against ISO 639-2 and ISO 639-3
|
||||
- Validate languages against ISO 639-1 (alpha2) and ISO 639-3 (alpha3)
|
||||
- Experimental validation of titles and abstracts against item's Dublin Core language field
|
||||
- Validate subjects against the AGROVOC REST API (see the `--agrovoc-fields` option)
|
||||
- Validation of licenses against the list of [SPDX license identifiers](https://spdx.org/licenses)
|
||||
- Fix leading, trailing, and excessive (ie, more than one) whitespace
|
||||
- Fix invalid multi-value separators (`|`) using `--unsafe-fixes`
|
||||
- Fix invalid and unnecessary multi-value separators (`|`)
|
||||
- Fix problematic newlines (line feeds) using `--unsafe-fixes`
|
||||
- Perform [Unicode normalization](https://withblue.ink/2019/03/11/why-you-need-to-normalize-unicode-strings.html) on strings using `--unsafe-fixes`
|
||||
- Remove unnecessary Unicode like [non-breaking spaces](https://en.wikipedia.org/wiki/Non-breaking_space), [replacement characters](https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character), etc
|
||||
- Check for "suspicious" characters that indicate encoding or copy/paste issues, for example "foreˆt" should be "forêt"
|
||||
- Check for "mojibake" characters (and attempt to fix with `--unsafe-fixes`)
|
||||
- Check for countries with missing regions (and attempt to fix with `--unsafe-fixes`)
|
||||
- Remove duplicate metadata values
|
||||
- Check for duplicate items, using the title, type, and date issued as an indicator
|
||||
- [Normalize DOIs](https://www.crossref.org/documentation/member-setup/constructing-your-dois/) to https://doi.org URI format
|
||||
|
||||
## Installation
|
||||
The easiest way to install CSV Metadata Quality is with [pipenv](https://github.com/pypa/pipenv):
|
||||
The easiest way to install CSV Metadata Quality is with [poetry](https://python-poetry.org):
|
||||
|
||||
```
|
||||
$ git clone https://github.com/ilri/csv-metadata-quality.git
|
||||
$ cd csv-metadata-quality
|
||||
$ pipenv install
|
||||
$ pipenv shell
|
||||
$ poetry install
|
||||
$ poetry shell
|
||||
```
|
||||
|
||||
Otherwise, if you don't have pipenv, you can use a vanilla Python virtual environment:
|
||||
Otherwise, if you don't have poetry, you can use a vanilla Python virtual environment:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/ilri/csv-metadata-quality.git
|
||||
@ -48,15 +66,36 @@ To validate and clean a CSV file you must specify input and output files using t
|
||||
$ csv-metadata-quality -i data/test.csv -o /tmp/test.csv
|
||||
```
|
||||
|
||||
## Unsafe Fixes
|
||||
You can enable several "unsafe" fixes with the `--unsafe-fixes` option. Currently this will attempt to fix invalid multi-value separators and remove newlines.
|
||||
## Invalid Multi-Value Separators
|
||||
While it is *theoretically* possible for a single `|` character to be used legitimately in a metadata value, in my experience it is always a typo. For example, if a user mistakenly writes `Kenya|Tanzania` when attempting to indicate two countries, the result will be one metadata value with the literal text `Kenya|Tanzania`. This utility will correct the invalid multi-value separator so that there are two metadata values, ie `Kenya||Tanzania`.
|
||||
|
||||
### Invalid Multi-Value Separators
|
||||
This is considered "unsafe" because it is *theoretically* possible for a single `|` character to be used legitimately in a metadata value, though in my experience it is always a typo. For example, if a user mistakenly writes `Kenya|Tanzania` when attempting to indicate two countries, the result will be one metadata value with the literal text `Kenya|Tanzania`. The `--unsafe-fixes` option will correct the invalid multi-value separator so that there are two metadata values, ie `Kenya||Tanzania`.
|
||||
This will also remove unnecessary trailing multi-value separators, for example `Kenya||Tanzania||`.
|
||||
|
||||
## Unsafe Fixes
|
||||
You can enable several "unsafe" fixes with the `--unsafe-fixes` option. Currently this will remove newlines, perform Unicode normalization, attempt to fix "mojibake" characters, and add missing UN M.49 regions.
|
||||
|
||||
### Newlines
|
||||
This is considered "unsafe" because some systems give special importance to vertical space and render it properly. DSpace does not support rendering newlines in its XMLUI and has, at times, suffered from parsing errors that cause the import process to fail if an input file had newlines. The `--unsafe-fixes` option strips Unix line feeds (U+000A).
|
||||
|
||||
### Unicode Normalization
|
||||
[Unicode](https://en.wikipedia.org/wiki/Unicode) is a standard for encoding text. As the standard aims to support most of the world's languages, characters can often be represented in different ways and still be valid Unicode. This leads to interesting problems that can be confusing unless you know what's going on behind the scenes. For example, the characters `é` and `é` *look* the same, but are not — technically they refer to different code points in the Unicode standard:
|
||||
|
||||
- `é` is the Unicode code point `U+00E9`
|
||||
- `é` is the Unicode code points `U+0065` + `U+0301`
|
||||
|
||||
Read more about [Unicode normalization](https://withblue.ink/2019/03/11/why-you-need-to-normalize-unicode-strings.html).
|
||||
|
||||
### Encoding Issues aka "Mojibake"
|
||||
[Mojibake](https://en.wikipedia.org/wiki/Mojibake) is a phenomenon that occurs when text is decoded using an unintended character encoding. This usually presents itself in the form of strange, garbled characters in the text. Enabling "unsafe" fixes will attempt to correct these, for example:
|
||||
|
||||
- CIAT Publicaçao → CIAT Publicaçao
|
||||
- CIAT Publicación → CIAT Publicación
|
||||
|
||||
Pay special attention to the output of the script as well as the resulting file to make sure no new issues have been introduced. The ideal way to solve these issues is to avoid it in the first place. See [this guide about opening CSVs in UTF-8 format in Excel](https://www.itg.ias.edu/content/how-import-csv-file-uses-utf-8-character-encoding-0).
|
||||
|
||||
### Countries With Missing Regions
|
||||
When an input file has both country and region columns we can check to see if the ISO 3166 country names have matching UN M.49 regions and add them when they are missing.
|
||||
|
||||
## AGROVOC Validation
|
||||
You can enable validation of metadata values in certain fields against the AGROVOC REST API with the `--agrovoc-fields` option. For example, in addition to agricultural subjects, many countries and regions are also present AGROVOC. Enable this validation by specifying a comma-separated list of fields:
|
||||
|
||||
@ -69,18 +108,35 @@ Invalid AGROVOC (cg.coverage.country): KENYAA
|
||||
|
||||
*Note: Requests to the AGROVOC REST API are cached using [requests_cache](https://pypi.org/project/requests-cache/) to speed up subsequent runs with the same data and to be kind to the system's administrators.*
|
||||
|
||||
## Experimental Checks
|
||||
You can enable experimental support for validating whether the value of an item's `dc.language.iso` or `dcterms.language` field matches the actual language used in its title, abstract, and citation.
|
||||
|
||||
```
|
||||
$ csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e
|
||||
...
|
||||
Possibly incorrect language es (detected en): Incorrect ISO 639-1 language
|
||||
Possibly incorrect language spa (detected eng): Incorrect ISO 639-3 language
|
||||
```
|
||||
|
||||
This currently uses the [Python langid](https://github.com/saffsd/langid.py) library. In the future I would like to move to the fastText library, but there is currently an [issue with their Python bindings](https://github.com/facebookresearch/fastText/issues/909) that makes this unfeasible.
|
||||
|
||||
## Todo
|
||||
|
||||
- Reporting / summary
|
||||
- Better logging, for example with INFO, WARN, and ERR levels
|
||||
- Verbose, debug, or quiet options
|
||||
- Warn if an author is shorter than 3 characters?
|
||||
- Validate dc.rights field against SPDX? Perhaps with an option like `-m spdx` to enable the spdx module?
|
||||
- Validate DOIs? Normalize to https://doi.org format? Or use just the DOI part: 10.1016/j.worlddev.2010.06.006
|
||||
- Warn if two items use the same file in `filename` column
|
||||
- Add an option to drop invalid AGROVOC subjects?
|
||||
- Add check for author names with incorrect spacing after commas, ie "Orth,Alan S."
|
||||
- Add tests for application invocation, ie `tests/test_app.py`?
|
||||
- Validate ISSNs or journal titles against CrossRef API?
|
||||
- Add configurable field validation, like specify a field name and a validation file?
|
||||
- Perhaps like --validate=field.name,filename
|
||||
- Add some row-based item sanity checks and fixes:
|
||||
- Warn if item is Open Access, but missing a filename or URL
|
||||
- Warn if item is Open Access, but missing a license
|
||||
- Warn if item has an ISSN but no journal title
|
||||
- Update journal titles from ISSN
|
||||
- Migrate from Pandas to Polars
|
||||
|
||||
## License
|
||||
This work is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.en.html).
|
||||
|
@ -1,10 +1,13 @@
|
||||
from csv_metadata_quality import app
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
from sys import argv
|
||||
|
||||
from csv_metadata_quality import app
|
||||
|
||||
|
||||
def main():
|
||||
app.run(argv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,21 +1,66 @@
|
||||
from csv_metadata_quality.version import VERSION
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import argparse
|
||||
import csv_metadata_quality.check as check
|
||||
import csv_metadata_quality.fix as fix
|
||||
import pandas as pd
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
from datetime import timedelta
|
||||
|
||||
import pandas as pd
|
||||
import requests_cache
|
||||
from colorama import Fore
|
||||
|
||||
import csv_metadata_quality.check as check
|
||||
import csv_metadata_quality.experimental as experimental
|
||||
import csv_metadata_quality.fix as fix
|
||||
from csv_metadata_quality.version import VERSION
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser(description='Metadata quality checker and fixer.')
|
||||
parser.add_argument('--agrovoc-fields', '-a', help='Comma-separated list of fields to validate against AGROVOC, for example: dc.subject,cg.coverage.country')
|
||||
parser.add_argument('--input-file', '-i', help='Path to input file. Can be UTF-8 CSV or Excel XLSX.', required=True, type=argparse.FileType('r', encoding='UTF-8'))
|
||||
parser.add_argument('--output-file', '-o', help='Path to output file (always CSV).', required=True, type=argparse.FileType('w', encoding='UTF-8'))
|
||||
parser.add_argument('--unsafe-fixes', '-u', help='Perform unsafe fixes.', action='store_true')
|
||||
parser.add_argument('--version', '-V', action='version', version=f'CSV Metadata Quality v{VERSION}')
|
||||
parser.add_argument('--exclude-fields', '-x', help='Comma-separated list of fields to skip, for example: dc.contributor.author,dc.identifier.citation')
|
||||
parser = argparse.ArgumentParser(description="Metadata quality checker and fixer.")
|
||||
parser.add_argument(
|
||||
"--agrovoc-fields",
|
||||
"-a",
|
||||
help="Comma-separated list of fields to validate against AGROVOC, for example: dcterms.subject,cg.coverage.country",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--drop-invalid-agrovoc",
|
||||
"-d",
|
||||
help="After validating metadata values against AGROVOC, drop invalid values.",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--experimental-checks",
|
||||
"-e",
|
||||
help="Enable experimental checks like language detection",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input-file",
|
||||
"-i",
|
||||
help="Path to input file. Must be a UTF-8 CSV.",
|
||||
required=True,
|
||||
type=argparse.FileType("r", encoding="UTF-8"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-file",
|
||||
"-o",
|
||||
help="Path to output file (always CSV).",
|
||||
required=True,
|
||||
type=argparse.FileType("w", encoding="UTF-8"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--unsafe-fixes", "-u", help="Perform unsafe fixes.", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version", "-V", action="version", version=f"CSV Metadata Quality v{VERSION}"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exclude-fields",
|
||||
"-x",
|
||||
help="Comma-separated list of fields to skip, for example: dc.contributor.author,dcterms.bibliographicCitation",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
return args
|
||||
@ -32,87 +77,182 @@ def run(argv):
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# Read all fields as strings so dates don't get converted from 1998 to 1998.0
|
||||
df = pd.read_csv(args.input_file, dtype=str)
|
||||
df = pd.read_csv(args.input_file, dtype_backend="pyarrow", dtype="str")
|
||||
|
||||
for column in df.columns.values.tolist():
|
||||
# Check if the user requested to skip any fields
|
||||
if args.exclude_fields:
|
||||
skip = False
|
||||
# Split the list of excludes on ',' so we can test exact matches
|
||||
# rather than fuzzy matches with regexes or "if word in string"
|
||||
for exclude in args.exclude_fields.split(','):
|
||||
if column == exclude and skip is False:
|
||||
skip = True
|
||||
if skip:
|
||||
print(f'Skipping {column}')
|
||||
# Check if the user requested to skip any fields
|
||||
if args.exclude_fields:
|
||||
# Split the list of excluded fields on ',' into a list. Note that the
|
||||
# user should be careful to no include spaces here.
|
||||
exclude = args.exclude_fields.split(",")
|
||||
else:
|
||||
exclude = []
|
||||
|
||||
continue
|
||||
# enable transparent request cache with thirty days expiry
|
||||
expire_after = timedelta(days=30)
|
||||
# Allow overriding the location of the requests cache, just in case we are
|
||||
# running in an environment where we can't write to the current working di-
|
||||
# rectory (for example from csv-metadata-quality-web).
|
||||
REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".")
|
||||
requests_cache.install_cache(
|
||||
f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after
|
||||
)
|
||||
|
||||
# Fix: whitespace
|
||||
df[column] = df[column].apply(fix.whitespace)
|
||||
# prune old cache entries
|
||||
requests_cache.delete()
|
||||
|
||||
for column in df.columns:
|
||||
if column in exclude:
|
||||
print(f"{Fore.YELLOW}Skipping {Fore.RESET}{column}")
|
||||
|
||||
continue
|
||||
|
||||
# Fix: newlines
|
||||
if args.unsafe_fixes:
|
||||
df[column] = df[column].apply(fix.newlines)
|
||||
# Skip whitespace and newline fixes on abstracts and descriptions
|
||||
# because there are too many with legitimate multi-line metadata.
|
||||
match = re.match(r"^.*?(abstract|description).*$", column)
|
||||
if match is None:
|
||||
# Fix: whitespace
|
||||
df[column] = df[column].apply(fix.whitespace, field_name=column)
|
||||
|
||||
# Fix: newlines
|
||||
df[column] = df[column].apply(fix.newlines, field_name=column)
|
||||
|
||||
# Fix: missing space after comma. Only run on author and citation
|
||||
# fields for now, as this problem is mostly an issue in names.
|
||||
if args.unsafe_fixes:
|
||||
match = re.match(r'^.*?(author|citation).*$', column)
|
||||
match = re.match(r"^.*?(author|[Cc]itation).*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(fix.comma_space, field_name=column)
|
||||
|
||||
# Fix: perform Unicode normalization (NFC) to convert decomposed
|
||||
# characters into their canonical forms.
|
||||
if args.unsafe_fixes:
|
||||
df[column] = df[column].apply(fix.normalize_unicode, field_name=column)
|
||||
|
||||
# Check: suspicious characters
|
||||
df[column].apply(check.suspicious_characters, field_name=column)
|
||||
|
||||
# Fix: mojibake. If unsafe fixes are not enabled then we only check.
|
||||
if args.unsafe_fixes:
|
||||
df[column] = df[column].apply(fix.mojibake, field_name=column)
|
||||
else:
|
||||
df[column].apply(check.mojibake, field_name=column)
|
||||
|
||||
# Fix: unnecessary Unicode
|
||||
df[column] = df[column].apply(fix.unnecessary_unicode)
|
||||
|
||||
# Check: invalid multi-value separator
|
||||
df[column] = df[column].apply(check.separators)
|
||||
# Fix: normalize DOIs
|
||||
match = re.match(r"^.*?identifier\.doi.*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(fix.normalize_dois)
|
||||
|
||||
# Check: suspicious characters
|
||||
df[column] = df[column].apply(check.suspicious_characters, field_name=column)
|
||||
|
||||
# Fix: invalid multi-value separator
|
||||
if args.unsafe_fixes:
|
||||
df[column] = df[column].apply(fix.separators)
|
||||
# Fix: invalid and unnecessary multi-value separators. Skip the title
|
||||
# and abstract fields because "|" is used to indicate something like
|
||||
# a subtitle.
|
||||
match = re.match(r"^.*?(abstract|[Cc]itation|title).*$", column)
|
||||
if match is None:
|
||||
df[column] = df[column].apply(fix.separators, field_name=column)
|
||||
# Run whitespace fix again after fixing invalid separators
|
||||
df[column] = df[column].apply(fix.whitespace)
|
||||
df[column] = df[column].apply(fix.whitespace, field_name=column)
|
||||
|
||||
# Fix: duplicate metadata values
|
||||
df[column] = df[column].apply(fix.duplicates)
|
||||
df[column] = df[column].apply(fix.duplicates, field_name=column)
|
||||
|
||||
# Check: invalid AGROVOC subject
|
||||
# Check: invalid AGROVOC subject and optionally drop them
|
||||
if args.agrovoc_fields:
|
||||
# Identify fields the user wants to validate against AGROVOC
|
||||
for field in args.agrovoc_fields.split(','):
|
||||
for field in args.agrovoc_fields.split(","):
|
||||
if column == field:
|
||||
df[column] = df[column].apply(check.agrovoc, field_name=column)
|
||||
df[column] = df[column].apply(
|
||||
check.agrovoc, field_name=column, drop=args.drop_invalid_agrovoc
|
||||
)
|
||||
|
||||
# Check: invalid language
|
||||
match = re.match(r'^.*?language.*$', column)
|
||||
match = re.match(r"^.*?language.*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(check.language)
|
||||
df[column].apply(check.language)
|
||||
|
||||
# Check: invalid ISSN
|
||||
match = re.match(r'^.*?issn.*$', column)
|
||||
match = re.match(r"^.*?issn.*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(check.issn)
|
||||
df[column].apply(check.issn)
|
||||
|
||||
# Check: invalid ISBN
|
||||
match = re.match(r'^.*?isbn.*$', column)
|
||||
match = re.match(r"^.*?isbn.*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(check.isbn)
|
||||
df[column].apply(check.isbn)
|
||||
|
||||
# Check: invalid date
|
||||
match = re.match(r'^.*?date.*$', column)
|
||||
match = re.match(r"^.*?(date|dcterms\.issued).*$", column)
|
||||
if match is not None:
|
||||
df[column] = df[column].apply(check.date, field_name=column)
|
||||
df[column].apply(check.date, field_name=column)
|
||||
|
||||
# Check: filename extension
|
||||
if column == 'filename':
|
||||
df[column] = df[column].apply(check.filename_extension)
|
||||
if column == "filename":
|
||||
df[column].apply(check.filename_extension)
|
||||
|
||||
# Check: SPDX license identifier
|
||||
match = re.match(r"dcterms\.license.*$", column)
|
||||
if match is not None:
|
||||
df[column].apply(check.spdx_license_identifier)
|
||||
|
||||
### End individual column checks ###
|
||||
|
||||
# Check: duplicate items
|
||||
# We extract just the title, type, and date issued columns to analyze
|
||||
try:
|
||||
duplicates_df = df.filter(
|
||||
regex=r"dcterms\.title|dc\.title|dcterms\.type|dc\.type|dcterms\.issued|dc\.date\.issued"
|
||||
)
|
||||
check.duplicate_items(duplicates_df)
|
||||
|
||||
# Delete the temporary duplicates DataFrame
|
||||
del duplicates_df
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
##
|
||||
# Perform some checks on rows so we can consider items as a whole rather
|
||||
# than simple on a field-by-field basis. This allows us to check whether
|
||||
# the language used in the title and abstract matches the language indi-
|
||||
# cated in the language field, for example.
|
||||
#
|
||||
# This is slower and apparently frowned upon in the Pandas community be-
|
||||
# cause it requires iterating over rows rather than using apply over a
|
||||
# column. For now it will have to do.
|
||||
##
|
||||
|
||||
# Transpose the DataFrame so we can consider each row as a column
|
||||
df_transposed = df.T
|
||||
|
||||
# Remember, here a "column" is an item (previously row). Perhaps I
|
||||
# should rename column in this for loop...
|
||||
for column in df_transposed.columns:
|
||||
# Check: citation DOI
|
||||
check.citation_doi(df_transposed[column], exclude)
|
||||
|
||||
# Check: title in citation
|
||||
check.title_in_citation(df_transposed[column], exclude)
|
||||
|
||||
if args.unsafe_fixes:
|
||||
# Fix: countries match regions
|
||||
df_transposed[column] = fix.countries_match_regions(
|
||||
df_transposed[column], exclude
|
||||
)
|
||||
else:
|
||||
# Check: countries match regions
|
||||
check.countries_match_regions(df_transposed[column], exclude)
|
||||
|
||||
if args.experimental_checks:
|
||||
experimental.correct_language(df_transposed[column], exclude)
|
||||
|
||||
# Transpose the DataFrame back before writing. This is probably wasteful to
|
||||
# do every time since we technically only need to do it if we've done the
|
||||
# countries/regions fix above, but I can't think of another way for now.
|
||||
df_transposed_back = df_transposed.T
|
||||
|
||||
# Write
|
||||
df.to_csv(args.output_file, index=False)
|
||||
df_transposed_back.to_csv(args.output_file, index=False)
|
||||
|
||||
# Close the input and output files before exiting
|
||||
args.input_file.close()
|
||||
|
@ -1,4 +1,18 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import country_converter as coco
|
||||
import pandas as pd
|
||||
import requests
|
||||
from colorama import Fore
|
||||
from pycountry import languages
|
||||
from stdnum import isbn as stdnum_isbn
|
||||
from stdnum import issn as stdnum_issn
|
||||
|
||||
from csv_metadata_quality.util import is_mojibake, load_spdx_licenses
|
||||
|
||||
|
||||
def issn(field):
|
||||
@ -11,19 +25,16 @@ def issn(field):
|
||||
See: https://arthurdejong.org/python-stdnum/doc/1.11/index.html#stdnum.module.is_valid
|
||||
"""
|
||||
|
||||
from stdnum import issn
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
for value in field.split("||"):
|
||||
if not stdnum_issn.is_valid(value):
|
||||
print(f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}")
|
||||
|
||||
if not issn.is_valid(value):
|
||||
print(f'Invalid ISSN: {value}')
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
|
||||
def isbn(field):
|
||||
@ -36,43 +47,16 @@ def isbn(field):
|
||||
See: https://arthurdejong.org/python-stdnum/doc/1.11/index.html#stdnum.module.is_valid
|
||||
"""
|
||||
|
||||
from stdnum import isbn
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
for value in field.split("||"):
|
||||
if not stdnum_isbn.is_valid(value):
|
||||
print(f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}")
|
||||
|
||||
if not isbn.is_valid(value):
|
||||
print(f'Invalid ISBN: {value}')
|
||||
|
||||
return field
|
||||
|
||||
|
||||
def separators(field):
|
||||
"""Check for invalid multi-value separators (ie "|" or "|||").
|
||||
|
||||
Prints the field with the invalid multi-value separator.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
|
||||
# After splitting, see if there are any remaining "|" characters
|
||||
match = re.findall(r'^.*?\|.*$', value)
|
||||
|
||||
if match:
|
||||
print(f'Invalid multi-value separator: {field}')
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
|
||||
def date(field, field_name):
|
||||
@ -85,47 +69,56 @@ def date(field, field_name):
|
||||
|
||||
Prints the date if invalid.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
if pd.isna(field):
|
||||
print(f'Missing date ({field_name}).')
|
||||
print(f"{Fore.RED}Missing date ({field_name}).{Fore.RESET}")
|
||||
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
multiple_dates = field.split('||')
|
||||
multiple_dates = field.split("||")
|
||||
|
||||
# We don't allow multi-value date fields
|
||||
if len(multiple_dates) > 1:
|
||||
print(f'Multiple dates not allowed ({field_name}): {field}')
|
||||
print(
|
||||
f"{Fore.RED}Multiple dates not allowed ({field_name}): {Fore.RESET}{field}"
|
||||
)
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
try:
|
||||
# Check if date is valid YYYY format
|
||||
datetime.strptime(field, '%Y')
|
||||
datetime.strptime(field, "%Y")
|
||||
|
||||
return field
|
||||
return
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# Check if date is valid YYYY-MM format
|
||||
datetime.strptime(field, '%Y-%m')
|
||||
datetime.strptime(field, "%Y-%m")
|
||||
|
||||
return field
|
||||
return
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# Check if date is valid YYYY-MM-DD format
|
||||
datetime.strptime(field, '%Y-%m-%d')
|
||||
datetime.strptime(field, "%Y-%m-%d")
|
||||
|
||||
return field
|
||||
return
|
||||
except ValueError:
|
||||
print(f'Invalid date ({field_name}): {field}')
|
||||
pass
|
||||
|
||||
return field
|
||||
try:
|
||||
# Check if date is valid YYYY-MM-DDTHH:MM:SSZ format
|
||||
datetime.strptime(field, "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
return
|
||||
except ValueError:
|
||||
print(f"{Fore.RED}Invalid date ({field_name}): {Fore.RESET}{field}")
|
||||
|
||||
return
|
||||
|
||||
|
||||
def suspicious_characters(field, field_name):
|
||||
@ -140,7 +133,7 @@ def suspicious_characters(field, field_name):
|
||||
return
|
||||
|
||||
# List of suspicious characters, for example: ́ˆ~`
|
||||
suspicious_characters = ['\u00B4', '\u02C6', '\u007E', '\u0060']
|
||||
suspicious_characters = ["\u00b4", "\u02c6", "\u007e", "\u0060"]
|
||||
|
||||
for character in suspicious_characters:
|
||||
# Find the position of the suspicious character in the string
|
||||
@ -156,20 +149,18 @@ def suspicious_characters(field, field_name):
|
||||
# character and spanning enough of the rest to give a preview,
|
||||
# but not too much to cause the line to break in terminals with
|
||||
# a default of 80 characters width.
|
||||
suspicious_character_msg = f'Suspicious character ({field_name}): {field_subset}'
|
||||
print(f'{suspicious_character_msg:1.80}')
|
||||
suspicious_character_msg = f"{Fore.YELLOW}Suspicious character ({field_name}): {Fore.RESET}{field_subset}"
|
||||
print(f"{suspicious_character_msg:1.80}")
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
|
||||
def language(field):
|
||||
"""Check if a language is valid ISO 639-2 or ISO 639-3.
|
||||
"""Check if a language is valid ISO 639-1 (alpha 2) or ISO 639-3 (alpha 3).
|
||||
|
||||
Prints the value if it is invalid.
|
||||
"""
|
||||
|
||||
from pycountry import languages
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
@ -177,27 +168,22 @@ def language(field):
|
||||
# need to handle "Other" values here...
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
|
||||
for value in field.split("||"):
|
||||
# After splitting, check if language value is 2 or 3 characters so we
|
||||
# can check it against ISO 639-2 or ISO 639-3 accordingly.
|
||||
# can check it against ISO 639-1 or ISO 639-3 accordingly.
|
||||
if len(value) == 2:
|
||||
if not languages.get(alpha_2=value):
|
||||
print(f'Invalid ISO 639-2 language: {value}')
|
||||
|
||||
pass
|
||||
print(f"{Fore.RED}Invalid ISO 639-1 language: {Fore.RESET}{value}")
|
||||
elif len(value) == 3:
|
||||
if not languages.get(alpha_3=value):
|
||||
print(f'Invalid ISO 639-3 language: {value}')
|
||||
|
||||
pass
|
||||
print(f"{Fore.RED}Invalid ISO 639-3 language: {Fore.RESET}{value}")
|
||||
else:
|
||||
print(f'Invalid language: {value}')
|
||||
print(f"{Fore.RED}Invalid language: {Fore.RESET}{value}")
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
|
||||
def agrovoc(field, field_name):
|
||||
def agrovoc(field, field_name, drop):
|
||||
"""Check subject terms against AGROVOC REST API.
|
||||
|
||||
Function constructor expects the field as well as the field name because
|
||||
@ -211,35 +197,44 @@ def agrovoc(field, field_name):
|
||||
Prints a warning if the value is invalid.
|
||||
"""
|
||||
|
||||
from datetime import timedelta
|
||||
import requests
|
||||
import requests_cache
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Initialize an empty list to hold the validated AGROVOC values
|
||||
values = []
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
request_url = f'http://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search?query={value}'
|
||||
for value in field.split("||"):
|
||||
request_url = "https://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search"
|
||||
request_params = {"query": value}
|
||||
|
||||
# enable transparent request cache with thirty days expiry
|
||||
expire_after = timedelta(days=30)
|
||||
requests_cache.install_cache('agrovoc-response-cache', expire_after=expire_after)
|
||||
|
||||
request = requests.get(request_url)
|
||||
|
||||
# prune old cache entries
|
||||
requests_cache.core.remove_expired_responses()
|
||||
request = requests.get(request_url, params=request_params)
|
||||
|
||||
if request.status_code == requests.codes.ok:
|
||||
data = request.json()
|
||||
|
||||
# check if there are any results
|
||||
if len(data['results']) == 0:
|
||||
print(f'Invalid AGROVOC ({field_name}): {value}')
|
||||
if len(data["results"]) == 0:
|
||||
if drop:
|
||||
print(
|
||||
f"{Fore.GREEN}Dropping invalid AGROVOC ({field_name}): {Fore.RESET}{value}"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"{Fore.RED}Invalid AGROVOC ({field_name}): {Fore.RESET}{value}"
|
||||
)
|
||||
|
||||
return field
|
||||
# value is invalid AGROVOC, but we are not dropping
|
||||
values.append(value)
|
||||
else:
|
||||
# value is valid AGROVOC so save it
|
||||
values.append(value)
|
||||
|
||||
# Create a new field consisting of all values joined with "||"
|
||||
new_field = "||".join(values)
|
||||
|
||||
return new_field
|
||||
|
||||
|
||||
def filename_extension(field):
|
||||
@ -253,26 +248,37 @@ def filename_extension(field):
|
||||
than .pdf, .xls(x), .doc(x), ppt(x), case insensitive).
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
values = field.split('||')
|
||||
values = field.split("||")
|
||||
|
||||
# List of common filename extentions
|
||||
common_filename_extensions = ['.pdf', '.doc', '.docx', '.ppt', '.pptx', '.xls', '.xlsx']
|
||||
common_filename_extensions = [
|
||||
".pdf",
|
||||
".doc",
|
||||
".docx",
|
||||
".ppt",
|
||||
".pptx",
|
||||
".xls",
|
||||
".xlsx",
|
||||
]
|
||||
|
||||
# Iterate over all values
|
||||
for value in values:
|
||||
# Strip filename descriptions that are meant for SAF Bundler, for
|
||||
# example: Annual_Report_2020.pdf__description:Report
|
||||
if "__description" in value:
|
||||
value = value.split("__")[0]
|
||||
|
||||
# Assume filename extension does not match
|
||||
filename_extension_match = False
|
||||
|
||||
for filename_extension in common_filename_extensions:
|
||||
# Check for extension at the end of the filename
|
||||
pattern = re.escape(filename_extension) + r'$'
|
||||
pattern = re.escape(filename_extension) + r"$"
|
||||
match = re.search(pattern, value, re.IGNORECASE)
|
||||
|
||||
if match is not None:
|
||||
@ -282,6 +288,273 @@ def filename_extension(field):
|
||||
break
|
||||
|
||||
if filename_extension_match is False:
|
||||
print(f'Filename with uncommon extension: {value}')
|
||||
print(f"{Fore.YELLOW}Filename with uncommon extension: {Fore.RESET}{value}")
|
||||
|
||||
return field
|
||||
return
|
||||
|
||||
|
||||
def spdx_license_identifier(field):
|
||||
"""Check if a license is a valid SPDX identifier.
|
||||
|
||||
Prints the value if it is invalid.
|
||||
"""
|
||||
|
||||
# List of common non-SPDX licenses to ignore
|
||||
# See: https://ilri.github.io/cgspace-submission-guidelines/dcterms-license/dcterms-license.txt
|
||||
ignore_licenses = {
|
||||
"All rights reserved; no re-use allowed",
|
||||
"All rights reserved; self-archive copy only",
|
||||
"Copyrighted; Non-commercial educational use only",
|
||||
"Copyrighted; Non-commercial use only",
|
||||
"Copyrighted; all rights reserved",
|
||||
"Other",
|
||||
}
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field) or field in ignore_licenses:
|
||||
return
|
||||
|
||||
spdx_licenses = load_spdx_licenses()
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split("||"):
|
||||
if value not in spdx_licenses:
|
||||
print(f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{value}")
|
||||
|
||||
return
|
||||
|
||||
|
||||
def duplicate_items(df):
|
||||
"""Attempt to identify duplicate items.
|
||||
|
||||
First we check the total number of titles and compare it with the number of
|
||||
unique titles. If there are less unique titles than total titles we expand
|
||||
the search by creating a key (of sorts) for each item that includes their
|
||||
title, type, and date issued, and compare it with all the others. If there
|
||||
are multiple occurrences of the same title, type, date string then it's a
|
||||
very good indicator that the items are duplicates.
|
||||
"""
|
||||
|
||||
# Extract the names of the title, type, and date issued columns so we can
|
||||
# reference them later. First we filter columns by likely patterns, then
|
||||
# we extract the name from the first item of the resulting object, ie:
|
||||
#
|
||||
# Index(['dcterms.title[en_US]'], dtype='object')
|
||||
#
|
||||
# But, we need to consider that dc.title.alternative might come before the
|
||||
# main title in the CSV, so use a negative lookahead to eliminate that.
|
||||
#
|
||||
# See: https://regex101.com/r/elyXkW/1
|
||||
title_column_name = df.filter(
|
||||
regex=r"^(dc|dcterms)\.title(?!\.alternative).*$"
|
||||
).columns[0]
|
||||
type_column_name = df.filter(regex=r"^(dcterms\.type|dc\.type).*$").columns[0]
|
||||
date_column_name = df.filter(
|
||||
regex=r"^(dcterms\.issued|dc\.date\.accessioned).*$"
|
||||
).columns[0]
|
||||
|
||||
items_count_total = df[title_column_name].count()
|
||||
items_count_unique = df[title_column_name].nunique()
|
||||
|
||||
if items_count_unique < items_count_total:
|
||||
# Create a list to hold our items while we check for duplicates
|
||||
items = []
|
||||
|
||||
for index, row in df.iterrows():
|
||||
item_title_type_date = f"{row[title_column_name]}{row[type_column_name]}{row[date_column_name]}"
|
||||
|
||||
if item_title_type_date in items:
|
||||
print(
|
||||
f"{Fore.YELLOW}Possible duplicate ({title_column_name}): {Fore.RESET}{row[title_column_name]}"
|
||||
)
|
||||
else:
|
||||
items.append(item_title_type_date)
|
||||
|
||||
|
||||
def mojibake(field, field_name):
|
||||
"""Check for mojibake (text that was encoded in one encoding and decoded in
|
||||
in another, perhaps multiple times). See util.py.
|
||||
|
||||
Prints the string if it contains suspected mojibake.
|
||||
"""
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
if is_mojibake(field):
|
||||
print(
|
||||
f"{Fore.YELLOW}Possible encoding issue ({field_name}): {Fore.RESET}{field}"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def citation_doi(row, exclude):
|
||||
"""Check for the scenario where an item has a DOI listed in its citation,
|
||||
but does not have a cg.identifier.doi field.
|
||||
|
||||
Function prints a warning if the DOI field is missing, but there is a DOI
|
||||
in the citation.
|
||||
"""
|
||||
# Check if the user requested us to skip any DOI fields so we can
|
||||
# just return before going any further.
|
||||
for field in exclude:
|
||||
match = re.match(r"^.*?doi.*$", field)
|
||||
if match is not None:
|
||||
return
|
||||
|
||||
# Initialize some variables at global scope so that we can set them in the
|
||||
# loop scope below and still be able to access them afterwards.
|
||||
citation = ""
|
||||
|
||||
# Iterate over the labels of the current row's values to check if a DOI
|
||||
# exists. If not, then we extract the citation to see if there is a DOI
|
||||
# listed there.
|
||||
for label in row.axes[0]:
|
||||
# Skip fields with missing values
|
||||
if pd.isna(row[label]):
|
||||
continue
|
||||
|
||||
# If a DOI field exists we don't need to check the citation
|
||||
match = re.match(r"^.*?doi.*$", label)
|
||||
if match is not None:
|
||||
return
|
||||
|
||||
# Check if the current label is a citation field and make sure the user
|
||||
# hasn't asked to skip it. If not, then set the citation.
|
||||
match = re.match(r"^.*?[cC]itation.*$", label)
|
||||
if match is not None and label not in exclude:
|
||||
citation = row[label]
|
||||
|
||||
if citation != "":
|
||||
# Check the citation for "doi: 10.1186/1743-422X-9-218"
|
||||
doi_match1 = re.match(r"^.*?doi:\s.*$", citation)
|
||||
# Check the citation for a DOI URL (doi.org, dx.doi.org, etc)
|
||||
doi_match2 = re.match(r"^.*?doi\.org.*$", citation)
|
||||
if doi_match1 is not None or doi_match2 is not None:
|
||||
print(
|
||||
f"{Fore.YELLOW}DOI in citation, but missing a DOI field: {Fore.RESET}{citation}"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def title_in_citation(row, exclude):
|
||||
"""Check for the scenario where an item's title is missing from its cita-
|
||||
tion. This could mean that it is missing entirely, or perhaps just exists
|
||||
in a different format (whitespace, accents, etc).
|
||||
|
||||
Function prints a warning if the title does not appear in the citation.
|
||||
"""
|
||||
# Initialize some variables at global scope so that we can set them in the
|
||||
# loop scope below and still be able to access them afterwards.
|
||||
title = ""
|
||||
citation = ""
|
||||
|
||||
# Iterate over the labels of the current row's values to get the names of
|
||||
# the title and citation columns. Then we check if the title is present in
|
||||
# the citation.
|
||||
for label in row.axes[0]:
|
||||
# Skip fields with missing values
|
||||
if pd.isna(row[label]):
|
||||
continue
|
||||
|
||||
# Find the name of the title column
|
||||
match = re.match(r"^(dc|dcterms)\.title.*$", label)
|
||||
if match is not None and label not in exclude:
|
||||
title = row[label]
|
||||
|
||||
# Find the name of the citation column
|
||||
match = re.match(r"^.*?[cC]itation.*$", label)
|
||||
if match is not None and label not in exclude:
|
||||
citation = row[label]
|
||||
|
||||
if citation != "":
|
||||
if title not in citation:
|
||||
print(f"{Fore.YELLOW}Title is not present in citation: {Fore.RESET}{title}")
|
||||
|
||||
return
|
||||
|
||||
|
||||
def countries_match_regions(row, exclude):
|
||||
"""Check for the scenario where an item has country coverage metadata, but
|
||||
does not have the corresponding region metadata. For example, an item that
|
||||
has country coverage "Kenya" should also have region "Eastern Africa" acc-
|
||||
ording to the UN M.49 classification scheme.
|
||||
|
||||
See: https://unstats.un.org/unsd/methodology/m49/
|
||||
|
||||
Function prints a warning if the appropriate region is not present.
|
||||
"""
|
||||
# Initialize some variables at global scope so that we can set them in the
|
||||
# loop scope below and still be able to access them afterwards.
|
||||
country_column_name = ""
|
||||
region_column_name = ""
|
||||
title_column_name = ""
|
||||
|
||||
# Instantiate a CountryConverter() object here. According to the docs it is
|
||||
# more performant to do that as opposed to calling coco.convert() directly
|
||||
# because we don't need to re-load the country data with each iteration.
|
||||
cc = coco.CountryConverter()
|
||||
|
||||
# Set logging to ERROR so country_converter's convert() doesn't print the
|
||||
# "not found in regex" warning message to the screen.
|
||||
logging.basicConfig(level=logging.ERROR)
|
||||
|
||||
# Iterate over the labels of the current row's values to get the names of
|
||||
# the title and citation columns. Then we check if the title is present in
|
||||
# the citation.
|
||||
for label in row.axes[0]:
|
||||
# Find the name of the country column
|
||||
match = re.match(r"^.*?country.*$", label)
|
||||
if match is not None:
|
||||
country_column_name = label
|
||||
|
||||
# Find the name of the region column, but make sure it's not subregion!
|
||||
match = re.match(r"^.*?region.*$", label)
|
||||
if match is not None and "sub" not in label:
|
||||
region_column_name = label
|
||||
|
||||
# Find the name of the title column
|
||||
match = re.match(r"^(dc|dcterms)\.title.*$", label)
|
||||
if match is not None:
|
||||
title_column_name = label
|
||||
|
||||
# Make sure the user has not asked to exclude any metadata fields. If so, we
|
||||
# should return immediately.
|
||||
column_names = [country_column_name, region_column_name, title_column_name]
|
||||
if any(field in column_names for field in exclude):
|
||||
return
|
||||
|
||||
# Make sure we found the country and region columns
|
||||
if country_column_name != "" and region_column_name != "":
|
||||
# If we don't have any countries then we should return early before
|
||||
# suggesting regions.
|
||||
if row[country_column_name] is not None:
|
||||
countries = row[country_column_name].split("||")
|
||||
else:
|
||||
return
|
||||
|
||||
if row[region_column_name] is not None:
|
||||
regions = row[region_column_name].split("||")
|
||||
else:
|
||||
regions = []
|
||||
|
||||
for country in countries:
|
||||
# Look up the UN M.49 regions for this country code. CoCo seems to
|
||||
# only list the direct region, ie Western Africa, rather than all
|
||||
# the parent regions ("Sub-Saharan Africa", "Africa", "World")
|
||||
un_region = cc.convert(names=country, to="UNRegion")
|
||||
|
||||
if un_region != "not found" and un_region not in regions:
|
||||
try:
|
||||
print(
|
||||
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}{row[title_column_name]}"
|
||||
)
|
||||
except KeyError:
|
||||
print(
|
||||
f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}<title field not present>"
|
||||
)
|
||||
|
||||
return
|
||||
|
8009
csv_metadata_quality/data/licenses.json
Normal file
8009
csv_metadata_quality/data/licenses.json
Normal file
File diff suppressed because it is too large
Load Diff
99
csv_metadata_quality/experimental.py
Normal file
99
csv_metadata_quality/experimental.py
Normal file
@ -0,0 +1,99 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
import py3langid as langid
|
||||
from colorama import Fore
|
||||
from pycountry import languages
|
||||
|
||||
|
||||
def correct_language(row, exclude):
|
||||
"""Analyze the text used in the title, abstract, and citation fields to pre-
|
||||
dict the language being used and compare it with the item's dc.language.iso
|
||||
field.
|
||||
|
||||
Function prints an error if the language field does not match the detected
|
||||
language and returns the value in the language field if it does match.
|
||||
"""
|
||||
|
||||
# Initialize some variables at global scope so that we can set them in the
|
||||
# loop scope below and still be able to access them afterwards.
|
||||
language = ""
|
||||
sample_strings = []
|
||||
title = None
|
||||
|
||||
# Iterate over the labels of the current row's values. Before we transposed
|
||||
# the DataFrame these were the columns in the CSV, ie dc.title and dc.type.
|
||||
for label in row.axes[0]:
|
||||
# Skip fields with missing values
|
||||
if pd.isna(row[label]):
|
||||
continue
|
||||
|
||||
# Check if current row has multiple language values (separated by "||")
|
||||
match = re.match(r"^.*?language.*$", label)
|
||||
if match is not None:
|
||||
# Skip fields with multiple language values
|
||||
if "||" in row[label]:
|
||||
return
|
||||
|
||||
language = row[label]
|
||||
|
||||
# Extract title if it is present (note that we don't allow excluding
|
||||
# the title here because it complicates things).
|
||||
match = re.match(r"^.*?title.*$", label)
|
||||
if match is not None:
|
||||
title = row[label]
|
||||
# Append title to sample strings
|
||||
sample_strings.append(row[label])
|
||||
|
||||
# Extract abstract if it is present
|
||||
match = re.match(r"^.*?abstract.*$", label)
|
||||
if match is not None and label not in exclude:
|
||||
sample_strings.append(row[label])
|
||||
|
||||
# Extract citation if it is present
|
||||
match = re.match(r"^.*?[cC]itation.*$", label)
|
||||
if match is not None and label not in exclude:
|
||||
sample_strings.append(row[label])
|
||||
|
||||
# Make sure language is not blank and is valid ISO 639-1/639-3 before proceeding with language prediction
|
||||
if language != "":
|
||||
# Check language value like "es"
|
||||
if len(language) == 2:
|
||||
if not languages.get(alpha_2=language):
|
||||
return
|
||||
# Check language value like "spa"
|
||||
elif len(language) == 3:
|
||||
if not languages.get(alpha_3=language):
|
||||
return
|
||||
# Language value is something else like "Span", do not proceed
|
||||
else:
|
||||
return
|
||||
# Language is blank, do not proceed
|
||||
else:
|
||||
return
|
||||
|
||||
# Concatenate all sample strings into one string
|
||||
sample_text = " ".join(sample_strings)
|
||||
|
||||
# Restrict the langid detection space to reduce false positives
|
||||
langid.set_languages(
|
||||
["ar", "de", "en", "es", "fr", "hi", "it", "ja", "ko", "pt", "ru", "vi", "zh"]
|
||||
)
|
||||
langid_classification = langid.classify(sample_text)
|
||||
|
||||
# langid returns an ISO 639-1 (alpha 2) representation of the detected language, but the current item's language field might be ISO 639-3 (alpha 3) so we should use a pycountry Language object to compare both represenations and give appropriate error messages that match the format used by in the input file.
|
||||
detected_language = languages.get(alpha_2=langid_classification[0])
|
||||
if len(language) == 2 and language != detected_language.alpha_2:
|
||||
print(
|
||||
f"{Fore.YELLOW}Possibly incorrect language {language} (detected {detected_language.alpha_2}): {Fore.RESET}{title}"
|
||||
)
|
||||
|
||||
elif len(language) == 3 and language != detected_language.alpha_3:
|
||||
print(
|
||||
f"{Fore.YELLOW}Possibly incorrect language {language} (detected {detected_language.alpha_3}): {Fore.RESET}{title}"
|
||||
)
|
||||
|
||||
else:
|
||||
return
|
@ -1,8 +1,18 @@
|
||||
import pandas as pd
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import logging
|
||||
import re
|
||||
from unicodedata import normalize
|
||||
|
||||
import country_converter as coco
|
||||
import pandas as pd
|
||||
from colorama import Fore
|
||||
from ftfy import TextFixerConfig, fix_text
|
||||
|
||||
from csv_metadata_quality.util import is_mojibake, is_nfc
|
||||
|
||||
|
||||
def whitespace(field):
|
||||
def whitespace(field, field_name):
|
||||
"""Fix whitespace issues.
|
||||
|
||||
Return string with leading, trailing, and consecutive whitespace trimmed.
|
||||
@ -13,56 +23,75 @@ def whitespace(field):
|
||||
return
|
||||
|
||||
# Initialize an empty list to hold the cleaned values
|
||||
values = list()
|
||||
values = []
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
for value in field.split("||"):
|
||||
# Strip leading and trailing whitespace
|
||||
value = value.strip()
|
||||
|
||||
# Replace excessive whitespace (>2) with one space
|
||||
pattern = re.compile(r'\s{2,}')
|
||||
pattern = re.compile(r"\s{2,}")
|
||||
match = re.findall(pattern, value)
|
||||
|
||||
if match:
|
||||
print(f'Excessive whitespace: {value}')
|
||||
value = re.sub(pattern, ' ', value)
|
||||
print(
|
||||
f"{Fore.GREEN}Removing excessive whitespace ({field_name}): {Fore.RESET}{value}"
|
||||
)
|
||||
value = re.sub(pattern, " ", value)
|
||||
|
||||
# Save cleaned value
|
||||
values.append(value)
|
||||
|
||||
# Create a new field consisting of all values joined with "||"
|
||||
new_field = '||'.join(values)
|
||||
new_field = "||".join(values)
|
||||
|
||||
return new_field
|
||||
|
||||
|
||||
def separators(field):
|
||||
"""Fix for invalid multi-value separators (ie "|")."""
|
||||
def separators(field, field_name):
|
||||
"""Fix for invalid and unnecessary multi-value separators, for example:
|
||||
|
||||
value|value
|
||||
value|||value
|
||||
value||value||
|
||||
|
||||
Prints the field with the invalid multi-value separator.
|
||||
"""
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Initialize an empty list to hold the cleaned values
|
||||
values = list()
|
||||
values = []
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
for value in field.split('||'):
|
||||
for value in field.split("||"):
|
||||
# Check if the value is blank and skip it
|
||||
if value == "":
|
||||
print(
|
||||
f"{Fore.GREEN}Fixing unnecessary multi-value separator ({field_name}): {Fore.RESET}{field}"
|
||||
)
|
||||
|
||||
continue
|
||||
|
||||
# After splitting, see if there are any remaining "|" characters
|
||||
pattern = re.compile(r'\|')
|
||||
pattern = re.compile(r"\|")
|
||||
match = re.findall(pattern, value)
|
||||
|
||||
if match:
|
||||
print(f'Fixing invalid multi-value separator: {value}')
|
||||
print(
|
||||
f"{Fore.GREEN}Fixing invalid multi-value separator ({field_name}): {Fore.RESET}{value}"
|
||||
)
|
||||
|
||||
value = re.sub(pattern, '||', value)
|
||||
value = re.sub(pattern, "||", value)
|
||||
|
||||
# Save cleaned value
|
||||
values.append(value)
|
||||
|
||||
# Create a new field consisting of all values joined with "||"
|
||||
new_field = '||'.join(values)
|
||||
new_field = "||".join(values)
|
||||
|
||||
return new_field
|
||||
|
||||
@ -73,10 +102,11 @@ def unnecessary_unicode(field):
|
||||
Removes unnecessary Unicode characters like:
|
||||
- Zero-width space (U+200B)
|
||||
- Replacement character (U+FFFD)
|
||||
- No-break space (U+00A0)
|
||||
|
||||
Replaces unnecessary Unicode characters like:
|
||||
- Soft hyphen (U+00AD) → hyphen
|
||||
- No-break space (U+00A0) → space
|
||||
- Thin space (U+2009) → space
|
||||
|
||||
Return string with characters removed or replaced.
|
||||
"""
|
||||
@ -86,41 +116,55 @@ def unnecessary_unicode(field):
|
||||
return
|
||||
|
||||
# Check for zero-width space characters (U+200B)
|
||||
pattern = re.compile(r'\u200B')
|
||||
pattern = re.compile(r"\u200B")
|
||||
match = re.findall(pattern, field)
|
||||
|
||||
if match:
|
||||
print(f'Removing unnecessary Unicode (U+200B): {field}')
|
||||
field = re.sub(pattern, '', field)
|
||||
print(f"{Fore.GREEN}Removing unnecessary Unicode (U+200B): {Fore.RESET}{field}")
|
||||
field = re.sub(pattern, "", field)
|
||||
|
||||
# Check for replacement characters (U+FFFD)
|
||||
pattern = re.compile(r'\uFFFD')
|
||||
pattern = re.compile(r"\uFFFD")
|
||||
match = re.findall(pattern, field)
|
||||
|
||||
if match:
|
||||
print(f'Removing unnecessary Unicode (U+FFFD): {field}')
|
||||
field = re.sub(pattern, '', field)
|
||||
print(f"{Fore.GREEN}Removing unnecessary Unicode (U+FFFD): {Fore.RESET}{field}")
|
||||
field = re.sub(pattern, "", field)
|
||||
|
||||
# Check for no-break spaces (U+00A0)
|
||||
pattern = re.compile(r'\u00A0')
|
||||
pattern = re.compile(r"\u00A0")
|
||||
match = re.findall(pattern, field)
|
||||
|
||||
if match:
|
||||
print(f'Removing unnecessary Unicode (U+00A0): {field}')
|
||||
field = re.sub(pattern, '', field)
|
||||
print(
|
||||
f"{Fore.GREEN}Replacing unnecessary Unicode (U+00A0): {Fore.RESET}{field}"
|
||||
)
|
||||
field = re.sub(pattern, " ", field)
|
||||
|
||||
# Check for soft hyphens (U+00AD), sometimes preceeded with a normal hyphen
|
||||
pattern = re.compile(r'\u002D*?\u00AD')
|
||||
pattern = re.compile(r"\u002D*?\u00AD")
|
||||
match = re.findall(pattern, field)
|
||||
|
||||
if match:
|
||||
print(f'Replacing unnecessary Unicode (U+00AD): {field}')
|
||||
field = re.sub(pattern, '-', field)
|
||||
print(
|
||||
f"{Fore.GREEN}Replacing unnecessary Unicode (U+00AD): {Fore.RESET}{field}"
|
||||
)
|
||||
field = re.sub(pattern, "-", field)
|
||||
|
||||
# Check for thin spaces (U+2009)
|
||||
pattern = re.compile(r"\u2009")
|
||||
match = re.findall(pattern, field)
|
||||
|
||||
if match:
|
||||
print(
|
||||
f"{Fore.GREEN}Replacing unnecessary Unicode (U+2009): {Fore.RESET}{field}"
|
||||
)
|
||||
field = re.sub(pattern, " ", field)
|
||||
|
||||
return field
|
||||
|
||||
|
||||
def duplicates(field):
|
||||
def duplicates(field, field_name):
|
||||
"""Remove duplicate metadata values."""
|
||||
|
||||
# Skip fields with missing values
|
||||
@ -128,10 +172,10 @@ def duplicates(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
values = field.split('||')
|
||||
values = field.split("||")
|
||||
|
||||
# Initialize an empty list to hold the de-duplicated values
|
||||
new_values = list()
|
||||
new_values = []
|
||||
|
||||
# Iterate over all values
|
||||
for value in values:
|
||||
@ -139,15 +183,17 @@ def duplicates(field):
|
||||
if value not in new_values:
|
||||
new_values.append(value)
|
||||
else:
|
||||
print(f'Dropping duplicate value: {value}')
|
||||
print(
|
||||
f"{Fore.GREEN}Removing duplicate value ({field_name}): {Fore.RESET}{value}"
|
||||
)
|
||||
|
||||
# Create a new field consisting of all values joined with "||"
|
||||
new_field = '||'.join(new_values)
|
||||
new_field = "||".join(new_values)
|
||||
|
||||
return new_field
|
||||
|
||||
|
||||
def newlines(field):
|
||||
def newlines(field, field_name):
|
||||
"""Fix newlines.
|
||||
|
||||
Single metadata values should not span multiple lines because this is not
|
||||
@ -169,11 +215,11 @@ def newlines(field):
|
||||
return
|
||||
|
||||
# Check for Unix line feed (LF)
|
||||
match = re.findall(r'\n', field)
|
||||
match = re.findall(r"\n", field)
|
||||
|
||||
if match:
|
||||
print(f'Removing newline: {field}')
|
||||
field = field.replace('\n', '')
|
||||
print(f"{Fore.GREEN}Removing newline ({field_name}): {Fore.RESET}{field}")
|
||||
field = field.replace("\n", "")
|
||||
|
||||
return field
|
||||
|
||||
@ -193,10 +239,230 @@ def comma_space(field, field_name):
|
||||
return
|
||||
|
||||
# Check for comma followed by a word character
|
||||
match = re.findall(r',\w', field)
|
||||
match = re.findall(r",\w", field)
|
||||
|
||||
if match:
|
||||
print(f'Adding space after comma ({field_name}): {field}')
|
||||
field = re.sub(r',(\w)', r', \1', field)
|
||||
print(
|
||||
f"{Fore.GREEN}Adding space after comma ({field_name}): {Fore.RESET}{field}"
|
||||
)
|
||||
field = re.sub(r",(\w)", r", \1", field)
|
||||
|
||||
return field
|
||||
|
||||
|
||||
def normalize_unicode(field, field_name):
|
||||
"""Fix occurrences of decomposed Unicode characters by normalizing them
|
||||
with NFC to their canonical forms, for example:
|
||||
|
||||
Ouédraogo, Mathieu → Ouédraogo, Mathieu
|
||||
|
||||
Return normalized string.
|
||||
"""
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Check if the current string is using normalized Unicode (NFC)
|
||||
if not is_nfc(field):
|
||||
print(f"{Fore.GREEN}Normalizing Unicode ({field_name}): {Fore.RESET}{field}")
|
||||
field = normalize("NFC", field)
|
||||
|
||||
return field
|
||||
|
||||
|
||||
def mojibake(field, field_name):
|
||||
"""Attempts to fix mojibake (text that was encoded in one encoding and deco-
|
||||
ded in another, perhaps multiple times). See util.py.
|
||||
|
||||
Return fixed string.
|
||||
"""
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return field
|
||||
|
||||
# We don't want ftfy to change “smart quotes” to "ASCII quotes"
|
||||
config = TextFixerConfig(uncurl_quotes=False)
|
||||
|
||||
if is_mojibake(field):
|
||||
print(f"{Fore.GREEN}Fixing encoding issue ({field_name}): {Fore.RESET}{field}")
|
||||
|
||||
return fix_text(field, config)
|
||||
else:
|
||||
return field
|
||||
|
||||
|
||||
def countries_match_regions(row, exclude):
|
||||
"""Check for the scenario where an item has country coverage metadata, but
|
||||
does not have the corresponding region metadata. For example, an item that
|
||||
has country coverage "Kenya" should also have region "Eastern Africa" acc-
|
||||
ording to the UN M.49 classification scheme.
|
||||
|
||||
See: https://unstats.un.org/unsd/methodology/m49/
|
||||
|
||||
Return fixed string.
|
||||
"""
|
||||
# Initialize some variables at global scope so that we can set them in the
|
||||
# loop scope below and still be able to access them afterwards.
|
||||
country_column_name = ""
|
||||
region_column_name = ""
|
||||
title_column_name = ""
|
||||
|
||||
# Instantiate a CountryConverter() object here. According to the docs it is
|
||||
# more performant to do that as opposed to calling coco.convert() directly
|
||||
# because we don't need to re-load the country data with each iteration.
|
||||
cc = coco.CountryConverter()
|
||||
|
||||
# Set logging to ERROR so country_converter's convert() doesn't print the
|
||||
# "not found in regex" warning message to the screen.
|
||||
logging.basicConfig(level=logging.ERROR)
|
||||
|
||||
# Iterate over the labels of the current row's values to get the names of
|
||||
# the title and citation columns. Then we check if the title is present in
|
||||
# the citation.
|
||||
for label in row.axes[0]:
|
||||
# Find the name of the country column
|
||||
match = re.match(r"^.*?country.*$", label)
|
||||
if match is not None:
|
||||
country_column_name = label
|
||||
|
||||
# Find the name of the region column, but make sure it's not subregion!
|
||||
match = re.match(r"^.*?region.*$", label)
|
||||
if match is not None and "sub" not in label:
|
||||
region_column_name = label
|
||||
|
||||
# Find the name of the title column
|
||||
match = re.match(r"^(dc|dcterms)\.title.*$", label)
|
||||
if match is not None:
|
||||
title_column_name = label
|
||||
|
||||
# Make sure the user has not asked to exclude any metadata fields. If so, we
|
||||
# should return immediately.
|
||||
column_names = [country_column_name, region_column_name, title_column_name]
|
||||
if any(field in column_names for field in exclude):
|
||||
return row
|
||||
|
||||
# Make sure we found the country and region columns
|
||||
if country_column_name != "" and region_column_name != "":
|
||||
# If we don't have any countries then we should return early before
|
||||
# suggesting regions.
|
||||
if row[country_column_name] is not None:
|
||||
countries = row[country_column_name].split("||")
|
||||
else:
|
||||
return row
|
||||
|
||||
if row[region_column_name] is not None:
|
||||
regions = row[region_column_name].split("||")
|
||||
else:
|
||||
regions = []
|
||||
|
||||
# An empty list for our regions so we can keep track for all countries
|
||||
missing_regions = []
|
||||
|
||||
for country in countries:
|
||||
# Look up the UN M.49 regions for this country code. CoCo seems to
|
||||
# only list the direct region, ie Western Africa, rather than all
|
||||
# the parent regions ("Sub-Saharan Africa", "Africa", "World")
|
||||
un_region = cc.convert(names=country, to="UNRegion")
|
||||
|
||||
# Add the new un_region to regions if it is not "not found" and if
|
||||
# it doesn't already exist in regions.
|
||||
if un_region != "not found" and un_region not in regions:
|
||||
if un_region not in missing_regions:
|
||||
try:
|
||||
print(
|
||||
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
|
||||
)
|
||||
except KeyError:
|
||||
# If there is no title column in the CSV we will print
|
||||
# the fix without the title instead of crashing.
|
||||
print(
|
||||
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}<title field not present>"
|
||||
)
|
||||
|
||||
missing_regions.append(un_region)
|
||||
|
||||
if len(missing_regions) > 0:
|
||||
# Add the missing regions back to the row, paying attention to whether
|
||||
# or not the row's region column is None (aka null) or just an empty
|
||||
# string (length would be 0).
|
||||
if row[region_column_name] is not None and len(row[region_column_name]) > 0:
|
||||
row[region_column_name] = (
|
||||
row[region_column_name] + "||" + "||".join(missing_regions)
|
||||
)
|
||||
else:
|
||||
row[region_column_name] = "||".join(missing_regions)
|
||||
|
||||
return row
|
||||
|
||||
|
||||
def normalize_dois(field):
|
||||
"""Normalize DOIs.
|
||||
|
||||
DOIs are meant to be globally unique identifiers. They are case insensitive,
|
||||
but in order to compare them robustly they should be normalized to a common
|
||||
format:
|
||||
|
||||
- strip leading and trailing whitespace
|
||||
- lowercase all ASCII characters
|
||||
- convert all variations to https://doi.org/10.xxxx/xxxx URI format
|
||||
|
||||
Return string with normalized DOI.
|
||||
|
||||
See: https://www.crossref.org/documentation/member-setup/constructing-your-dois/
|
||||
"""
|
||||
|
||||
# Skip fields with missing values
|
||||
if pd.isna(field):
|
||||
return
|
||||
|
||||
# Try to split multi-value field on "||" separator
|
||||
values = field.split("||")
|
||||
|
||||
# Initialize an empty list to hold the de-duplicated values
|
||||
new_values = []
|
||||
|
||||
# Iterate over all values (most items will only have one DOI)
|
||||
for value in values:
|
||||
# Strip leading and trailing whitespace
|
||||
new_value = value.strip()
|
||||
|
||||
new_value = new_value.lower()
|
||||
|
||||
# Convert to HTTPS
|
||||
pattern = re.compile(r"^http://")
|
||||
match = re.findall(pattern, new_value)
|
||||
|
||||
if match:
|
||||
new_value = re.sub(pattern, "https://", new_value)
|
||||
|
||||
# Convert dx.doi.org to doi.org
|
||||
pattern = re.compile(r"dx\.doi\.org")
|
||||
match = re.findall(pattern, new_value)
|
||||
|
||||
if match:
|
||||
new_value = re.sub(pattern, "doi.org", new_value)
|
||||
|
||||
# Replace values like doi: 10.11648/j.jps.20140201.14
|
||||
pattern = re.compile(r"^doi: 10\.")
|
||||
match = re.findall(pattern, new_value)
|
||||
|
||||
if match:
|
||||
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
|
||||
|
||||
# Replace values like 10.3390/foods12010115
|
||||
pattern = re.compile(r"^10\.")
|
||||
match = re.findall(pattern, new_value)
|
||||
|
||||
if match:
|
||||
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
|
||||
|
||||
if new_value != value:
|
||||
print(f"{Fore.GREEN}Normalized DOI: {Fore.RESET}{value}")
|
||||
|
||||
new_values.append(new_value)
|
||||
|
||||
new_field = "||".join(new_values)
|
||||
|
||||
return new_field
|
||||
|
65
csv_metadata_quality/util.py
Normal file
65
csv_metadata_quality/util.py
Normal file
@ -0,0 +1,65 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from ftfy.badness import is_bad
|
||||
|
||||
|
||||
def is_nfc(field):
|
||||
"""Utility function to check whether a string is using normalized Unicode.
|
||||
Python's built-in unicodedata library has the is_normalized() function, but
|
||||
it was only introduced in Python 3.8. By using a simple utility function we
|
||||
are able to run on Python >= 3.6 again.
|
||||
|
||||
See: https://docs.python.org/3/library/unicodedata.html
|
||||
|
||||
Return boolean.
|
||||
"""
|
||||
|
||||
from unicodedata import normalize
|
||||
|
||||
return field == normalize("NFC", field)
|
||||
|
||||
|
||||
def is_mojibake(field):
|
||||
"""Determines whether a string contains mojibake.
|
||||
|
||||
We commonly deal with CSV files that were *encoded* in UTF-8, but decoded
|
||||
as something else like CP-1252 (Windows Latin). This manifests in the form
|
||||
of "mojibake", for example:
|
||||
|
||||
- CIAT Publicaçao
|
||||
- CIAT Publicación
|
||||
|
||||
This uses the excellent "fixes text for you" (ftfy) library to determine
|
||||
whether a string contains characters that have been encoded in one encoding
|
||||
and decoded in another.
|
||||
|
||||
Inspired by this code snippet from Martijn Pieters on StackOverflow:
|
||||
https://stackoverflow.com/questions/29071995/identify-garbage-unicode-string-using-python
|
||||
|
||||
Return boolean.
|
||||
"""
|
||||
if not is_bad(field):
|
||||
# Nothing weird, should be okay
|
||||
return False
|
||||
try:
|
||||
field.encode("sloppy-windows-1252")
|
||||
except UnicodeEncodeError:
|
||||
# Not CP-1252 encodable, probably fine
|
||||
return False
|
||||
else:
|
||||
# Encodable as CP-1252, Mojibake alert level high
|
||||
return True
|
||||
|
||||
|
||||
def load_spdx_licenses():
|
||||
"""Returns a Python list of SPDX short license identifiers."""
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), "data/licenses.json")) as f:
|
||||
licenses = json.load(f)
|
||||
|
||||
# List comprehension to extract the license ID for each license
|
||||
return [license["licenseId"] for license in licenses["licenses"]]
|
@ -1 +1,3 @@
|
||||
VERSION = '0.2.2'
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
VERSION = "0.6.1"
|
||||
|
17
data/abstract-check.csv
Normal file
17
data/abstract-check.csv
Normal file
@ -0,0 +1,17 @@
|
||||
id,dc.title,dcterms.abstract
|
||||
1,Normal item,This is an abstract
|
||||
2,Leading whitespace, This is an abstract
|
||||
3,Trailing whitespace,This is an abstract
|
||||
4,Consecutive whitespace,This is an abstract
|
||||
5,Newline,"This
|
||||
is an abstract"
|
||||
6,Newline with leading whitespace," This
|
||||
is an abstract"
|
||||
7,Newline with trailing whitespace,"This
|
||||
is an abstract "
|
||||
8,Newline with consecutive whitespace,"This
|
||||
is an abstract"
|
||||
9,Multiple newlines,"This
|
||||
is
|
||||
an
|
||||
abstract"
|
|
13
data/test-geography.csv
Normal file
13
data/test-geography.csv
Normal file
@ -0,0 +1,13 @@
|
||||
dc.title,dcterms.issued,dcterms.type,dc.contributor.author,cg.coverage.country,cg.coverage.region
|
||||
No country,2022-09-01,Report,"Orth, Alan",,
|
||||
Matching country and region,2022-09-01,Report,"Orth, Alan",Kenya,Eastern Africa
|
||||
Missing region,2022-09-01,Report,"Orth, Alan",Kenya,
|
||||
Caribbean country with matching region,2022-09-01,Report,"Orth, Alan",Bahamas,Caribbean
|
||||
Caribbean country with no region,2022-09-01,Report,"Orth, Alan",Bahamas,
|
||||
Fake country with no region,2022-09-01,Report,"Orth, Alan",Yeah Baby,
|
||||
SE Asian country with matching region,2022-09-01,Report,"Orth, Alan",Cambodia,South-eastern Asia
|
||||
SE Asian country with no region,2022-09-01,Report,"Orth, Alan",Cambodia,
|
||||
Duplicate countries with matching region,2022-09-01,Report,"Orth, Alan",Kenya||Kenya,Eastern Africa
|
||||
Duplicate countries with missing regions,2022-09-01,Report,"Orth, Alan",Kenya||Kenya,
|
||||
Multiple countries with no regions,2022-09-01,Report,"Orth, Alan",Kenya||Bahamas,
|
||||
Multiple countries with mixed matching regions,2022-09-01,Report,"Orth, Alan",Kenya||Bahamas,Eastern Africa
|
|
@ -1,26 +1,42 @@
|
||||
dc.contributor.author,birthdate,dc.identifier.issn,dc.identifier.isbn,dc.language.iso,dc.subject,cg.coverage.country,filename
|
||||
Leading space,2019-07-29,,,,,,
|
||||
Trailing space ,2019-07-29,,,,,,
|
||||
Excessive space,2019-07-29,,,,,,
|
||||
Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,
|
||||
Duplicate||Duplicate,2019-07-29,,,,,,
|
||||
Invalid ISSN,2019-07-29,2321-2302,,,,,
|
||||
Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,
|
||||
Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,
|
||||
Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,
|
||||
Invalid date,2019-07-260,,,,,,
|
||||
Multiple dates,2019-07-26||2019-01-10,,,,,,
|
||||
Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,
|
||||
Unnecessary Unicode,2019-07-29,,,,,,
|
||||
Suspicious character||foreˆt,2019-07-29,,,,,,
|
||||
Invalid ISO 639-2 language,2019-07-29,,,jp,,,
|
||||
Invalid ISO 639-3 language,2019-07-29,,,chi,,,
|
||||
Invalid language,2019-07-29,,,Span,,,
|
||||
Invalid AGROVOC subject,2019-07-29,,,,FOREST,,
|
||||
dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region,cg.coverage.subregion
|
||||
Leading space,2019-07-29,,,,,,,,,,,,
|
||||
Trailing space ,2019-07-29,,,,,,,,,,,,
|
||||
Excessive space,2019-07-29,,,,,,,,,,,,
|
||||
Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,,
|
||||
Duplicate||Duplicate,2019-07-29,,,,,,,,,,,,
|
||||
Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,,
|
||||
Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,,
|
||||
Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,,
|
||||
Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,,
|
||||
Invalid date,2019-07-260,,,,,,,,,,,,
|
||||
Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,,
|
||||
Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,,
|
||||
Unnecessary Unicode,2019-07-29,,,,,,,,,,,,
|
||||
Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,,
|
||||
Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,,
|
||||
Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,,
|
||||
Invalid language,2019-07-29,,,Span,,,,,,,,,
|
||||
Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,,
|
||||
Newline (LF),2019-07-30,,,,"TANZA
|
||||
NIA",,
|
||||
Missing date,,,,,,,
|
||||
Invalid country,2019-08-01,,,,,KENYAA,
|
||||
Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck
|
||||
Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-92-9043-823-6,,,,
|
||||
"Missing space,after comma",2019-08-27,,,,,,
|
||||
NIA",,,,,,,,
|
||||
Missing date,,,,,,,,,,,,,
|
||||
Invalid country,2019-08-01,,,,,KENYAA,,,,,,,
|
||||
Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,,
|
||||
Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-92-9043-823-6,,,,,,,,,,
|
||||
"Missing space,after comma",2019-08-27,,,,,,,,,,,,
|
||||
Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,,
|
||||
Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,,
|
||||
Composéd Unicode,2020-01-14,,,,,,,,,,,,
|
||||
Decomposéd Unicode,2020-01-14,,,,,,,,,,,,
|
||||
Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,,
|
||||
Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,,
|
||||
Duplicate Title,2021-03-17,,,,,,,,Report,,,,
|
||||
Duplicate Title,2021-03-17,,,,,,,,Report,,,,
|
||||
Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,,
|
||||
"DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,,
|
||||
Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,,
|
||||
Country missing region,2021-12-08,,,,,Kenya,,,,,,,
|
||||
Subregion field shouldn’t trigger region checks,2022-12-07,,,,,Kenya,,,,,,Eastern Africa,Baringo
|
||||
DOI with HTTP and dx.doi.org,2024-04-23,,,,,,,,,,http://dx.doi.org/10.1016/j.envc.2023.100794,,
|
||||
DOI with colon,2024-04-23,,,,,,,,,,doi: 10.11648/j.jps.20140201.14,,
|
||||
Upper case bare DOI,2024-04-23,,,,,,,,,,10.19103/AS.2018.0043.16,,
|
||||
|
|
1903
poetry.lock
generated
Normal file
1903
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
41
pyproject.toml
Normal file
41
pyproject.toml
Normal file
@ -0,0 +1,41 @@
|
||||
[tool.poetry]
|
||||
name = "csv-metadata-quality"
|
||||
version = "0.6.1"
|
||||
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem."
|
||||
authors = ["Alan Orth <alan.orth@gmail.com>"]
|
||||
license="GPL-3.0-only"
|
||||
repository = "https://github.com/ilri/csv-metadata-quality"
|
||||
homepage = "https://github.com/ilri/csv-metadata-quality"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
csv-metadata-quality = 'csv_metadata_quality.__main__:main'
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
pandas = {version = "^2.0.2", extras = ["feather", "performance"]}
|
||||
python-stdnum = "^1.18"
|
||||
requests = "^2.28.2"
|
||||
requests-cache = "^1.0.0"
|
||||
colorama = "^0.4.6"
|
||||
ftfy = "^6.1.1"
|
||||
country-converter = "~1.1.0"
|
||||
pycountry = "^23.12.7"
|
||||
py3langid = "^0.2.2"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest = "^7.2.1"
|
||||
flake8 = "^7.0.0"
|
||||
pytest-clarity = "^1.0.1"
|
||||
black = "^23.1.0"
|
||||
isort = "^5.12.0"
|
||||
csvkit = "^1.1.0"
|
||||
ipython = "^8.10.0"
|
||||
fixit = "^2.1.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry>=0.12"]
|
||||
build-backend = "poetry.masonry.api"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
line_length=88
|
@ -1,5 +1,5 @@
|
||||
[pytest]
|
||||
addopts= -rsxX -s -v --strict --capture=sys
|
||||
addopts= -rsxX -s -v --strict-markers --capture=sys
|
||||
filterwarnings =
|
||||
error::UserWarning
|
||||
ignore:.*U.* is deprecated:DeprecationWarning
|
||||
|
9
renovate.json
Normal file
9
renovate.json
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:base"
|
||||
],
|
||||
"pip_requirements": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
@ -1,32 +1,82 @@
|
||||
-i https://pypi.org/simple
|
||||
atomicwrites==1.3.0
|
||||
attrs==19.1.0
|
||||
backcall==0.1.0
|
||||
decorator==4.4.0
|
||||
entrypoints==0.3
|
||||
flake8==3.7.8
|
||||
importlib-metadata==0.19
|
||||
ipython-genutils==0.2.0
|
||||
ipython==7.7.0
|
||||
jedi==0.14.1
|
||||
mccabe==0.6.1
|
||||
more-itertools==7.2.0
|
||||
packaging==19.1
|
||||
parso==0.5.1
|
||||
pexpect==4.7.0 ; sys_platform != 'win32'
|
||||
pickleshare==0.7.5
|
||||
pluggy==0.12.0
|
||||
prompt-toolkit==2.0.9
|
||||
ptyprocess==0.6.0
|
||||
py==1.8.0
|
||||
pycodestyle==2.5.0
|
||||
pyflakes==2.1.1
|
||||
pygments==2.4.2
|
||||
pyparsing==2.4.2
|
||||
pytest-clarity==0.2.0a1
|
||||
pytest==5.0.1
|
||||
six==1.12.0
|
||||
termcolor==1.1.0
|
||||
traitlets==4.3.2
|
||||
wcwidth==0.1.7
|
||||
zipp==0.5.2
|
||||
agate-dbf==0.2.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
agate-excel==0.2.5 ; python_version >= "3.9" and python_version < "4.0"
|
||||
agate-sql==0.5.9 ; python_version >= "3.9" and python_version < "4.0"
|
||||
agate==1.7.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
appdirs==1.4.4 ; python_version >= "3.9" and python_version < "4.0"
|
||||
appnope==0.1.3 ; python_version >= "3.9" and python_version < "4.0" and sys_platform == "darwin"
|
||||
asttokens==2.2.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
attrs==23.1.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
babel==2.12.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
backcall==0.2.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
black==23.3.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
cattrs==22.2.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
certifi==2022.12.7 ; python_version >= "3.9" and python_version < "4.0"
|
||||
charset-normalizer==3.1.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
click==8.1.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
country-converter==1.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
csvkit==1.1.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
dbfread==2.0.7 ; python_version >= "3.9" and python_version < "4.0"
|
||||
decorator==5.1.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
et-xmlfile==1.1.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
exceptiongroup==1.1.1 ; python_version >= "3.9" and python_version < "3.11"
|
||||
executing==1.2.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
flake8==6.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
ftfy==6.1.1 ; python_version >= "3.9" and python_version < "4"
|
||||
greenlet==2.0.2 ; python_version >= "3.9" and platform_machine == "aarch64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "ppc64le" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "x86_64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "amd64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "AMD64" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "win32" and python_version < "4.0" or python_version >= "3.9" and platform_machine == "WIN32" and python_version < "4.0"
|
||||
idna==3.4 ; python_version >= "3.9" and python_version < "4.0"
|
||||
iniconfig==2.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
ipython==8.13.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
isodate==0.6.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
isort==5.12.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
jedi==0.18.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
langid==1.1.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
leather==0.3.4 ; python_version >= "3.9" and python_version < "4.0"
|
||||
markdown-it-py==2.2.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
matplotlib-inline==0.1.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
mccabe==0.7.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
mdurl==0.1.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
mypy-extensions==1.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
numpy==1.24.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
olefile==0.46 ; python_version >= "3.9" and python_version < "4.0"
|
||||
openpyxl==3.1.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
packaging==23.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pandas==2.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
parsedatetime==2.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
parso==0.8.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pathspec==0.11.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pexpect==4.8.0 ; python_version >= "3.9" and python_version < "4.0" and sys_platform != "win32"
|
||||
pickleshare==0.7.5 ; python_version >= "3.9" and python_version < "4.0"
|
||||
platformdirs==3.5.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pluggy==1.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pprintpp==0.4.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
prompt-toolkit==3.0.38 ; python_version >= "3.9" and python_version < "4.0"
|
||||
ptyprocess==0.7.0 ; python_version >= "3.9" and python_version < "4.0" and sys_platform != "win32"
|
||||
pure-eval==0.2.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pyarrow==11.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pycodestyle==2.10.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pycountry @ git+https://github.com/alanorth/pycountry@iso-codes-4.13.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pyflakes==3.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pygments==2.15.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pytest-clarity==1.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pytest==7.3.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
python-slugify==8.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
python-stdnum==1.18 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pytimeparse==1.1.8 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pytz==2023.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
requests-cache==0.9.8 ; python_version >= "3.9" and python_version < "4.0"
|
||||
requests==2.29.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
rich==13.3.5 ; python_version >= "3.9" and python_version < "4.0"
|
||||
six==1.16.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
sqlalchemy==1.4.48 ; python_version >= "3.9" and python_version < "4.0"
|
||||
stack-data==0.6.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
text-unidecode==1.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
tomli==2.0.1 ; python_version >= "3.9" and python_version < "3.11"
|
||||
traitlets==5.9.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
typing-extensions==4.5.0 ; python_version >= "3.9" and python_version < "3.10"
|
||||
tzdata==2023.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
url-normalize==1.4.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
urllib3==1.26.15 ; python_version >= "3.9" and python_version < "4.0"
|
||||
wcwidth==0.2.6 ; python_version >= "3.9" and python_version < "4"
|
||||
xlrd==2.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
|
@ -1,16 +1,25 @@
|
||||
-i https://pypi.org/simple
|
||||
-e .
|
||||
certifi==2019.6.16
|
||||
chardet==3.0.4
|
||||
idna==2.8
|
||||
numpy==1.17.0
|
||||
pandas==0.25.0
|
||||
pycountry==19.7.15
|
||||
python-dateutil==2.8.0
|
||||
python-stdnum==1.11
|
||||
pytz==2019.2
|
||||
requests-cache==0.5.0
|
||||
requests==2.22.0
|
||||
six==1.12.0
|
||||
urllib3==1.25.3
|
||||
xlrd==1.2.0
|
||||
appdirs==1.4.4 ; python_version >= "3.9" and python_version < "4.0"
|
||||
attrs==23.1.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
cattrs==22.2.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
certifi==2022.12.7 ; python_version >= "3.9" and python_version < "4.0"
|
||||
charset-normalizer==3.1.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
country-converter==1.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
exceptiongroup==1.1.1 ; python_version >= "3.9" and python_version < "3.11"
|
||||
ftfy==6.1.1 ; python_version >= "3.9" and python_version < "4"
|
||||
idna==3.4 ; python_version >= "3.9" and python_version < "4.0"
|
||||
langid==1.1.6 ; python_version >= "3.9" and python_version < "4.0"
|
||||
numpy==1.24.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pandas==2.0.1 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pyarrow==11.0.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pycountry @ git+https://github.com/alanorth/pycountry@iso-codes-4.13.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "4.0"
|
||||
python-stdnum==1.18 ; python_version >= "3.9" and python_version < "4.0"
|
||||
pytz==2023.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
requests-cache==0.9.8 ; python_version >= "3.9" and python_version < "4.0"
|
||||
requests==2.29.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
six==1.16.0 ; python_version >= "3.9" and python_version < "4.0"
|
||||
tzdata==2023.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
url-normalize==1.4.3 ; python_version >= "3.9" and python_version < "4.0"
|
||||
urllib3==1.26.15 ; python_version >= "3.9" and python_version < "4.0"
|
||||
wcwidth==0.2.6 ; python_version >= "3.9" and python_version < "4"
|
||||
|
38
setup.py
38
setup.py
@ -1,38 +0,0 @@
|
||||
import setuptools
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
install_requires = [
|
||||
'pandas',
|
||||
'python-stdnum',
|
||||
'requests',
|
||||
'requests-cache',
|
||||
'pycountry'
|
||||
]
|
||||
|
||||
setuptools.setup(
|
||||
name="csv-metadata-quality",
|
||||
version="0.2.2",
|
||||
author="Alan Orth",
|
||||
author_email="aorth@mjanja.ch",
|
||||
description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem.",
|
||||
license="GPLv3",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/alanorth/csv-metadata-quality",
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
||||
"Operating System :: OS Independent",
|
||||
"Development Status :: 4 - Beta"
|
||||
],
|
||||
packages=['csv_metadata_quality'],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'csv-metadata-quality = csv_metadata_quality.__main__:main'
|
||||
]
|
||||
},
|
||||
install_requires=install_requires
|
||||
)
|
@ -1,225 +1,514 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import pandas as pd
|
||||
from colorama import Fore
|
||||
|
||||
import csv_metadata_quality.check as check
|
||||
import csv_metadata_quality.experimental as experimental
|
||||
|
||||
|
||||
def test_check_invalid_issn(capsys):
|
||||
'''Test checking invalid ISSN.'''
|
||||
"""Test checking invalid ISSN."""
|
||||
|
||||
value = '2321-2302'
|
||||
value = "2321-2302"
|
||||
|
||||
check.issn(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid ISSN: {value}\n'
|
||||
assert captured.out == f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}\n"
|
||||
|
||||
|
||||
def test_check_valid_issn():
|
||||
'''Test checking valid ISSN.'''
|
||||
"""Test checking valid ISSN."""
|
||||
|
||||
value = '0024-9319'
|
||||
value = "0024-9319"
|
||||
|
||||
result = check.issn(value)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_invalid_isbn(capsys):
|
||||
'''Test checking invalid ISBN.'''
|
||||
"""Test checking invalid ISBN."""
|
||||
|
||||
value = '99921-58-10-6'
|
||||
value = "99921-58-10-6"
|
||||
|
||||
check.isbn(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid ISBN: {value}\n'
|
||||
assert captured.out == f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}\n"
|
||||
|
||||
|
||||
def test_check_valid_isbn():
|
||||
'''Test checking valid ISBN.'''
|
||||
"""Test checking valid ISBN."""
|
||||
|
||||
value = '99921-58-10-7'
|
||||
value = "99921-58-10-7"
|
||||
|
||||
result = check.isbn(value)
|
||||
|
||||
assert result == value
|
||||
|
||||
|
||||
def test_check_invalid_separators(capsys):
|
||||
'''Test checking invalid multi-value separators.'''
|
||||
|
||||
value = 'Alan|Orth'
|
||||
|
||||
check.separators(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid multi-value separator: {value}\n'
|
||||
|
||||
|
||||
def test_check_valid_separators():
|
||||
'''Test checking valid multi-value separators.'''
|
||||
|
||||
value = 'Alan||Orth'
|
||||
|
||||
result = check.separators(value)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_missing_date(capsys):
|
||||
'''Test checking missing date.'''
|
||||
"""Test checking missing date."""
|
||||
|
||||
value = None
|
||||
|
||||
field_name = 'dc.date.issued'
|
||||
field_name = "dc.date.issued"
|
||||
|
||||
check.date(value, field_name)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Missing date ({field_name}).\n'
|
||||
assert captured.out == f"{Fore.RED}Missing date ({field_name}).{Fore.RESET}\n"
|
||||
|
||||
|
||||
def test_check_multiple_dates(capsys):
|
||||
'''Test checking multiple dates.'''
|
||||
"""Test checking multiple dates."""
|
||||
|
||||
value = '1990||1991'
|
||||
value = "1990||1991"
|
||||
|
||||
field_name = 'dc.date.issued'
|
||||
field_name = "dc.date.issued"
|
||||
|
||||
check.date(value, field_name)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Multiple dates not allowed ({field_name}): {value}\n'
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.RED}Multiple dates not allowed ({field_name}): {Fore.RESET}{value}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_invalid_date(capsys):
|
||||
'''Test checking invalid ISO8601 date.'''
|
||||
"""Test checking invalid ISO8601 date."""
|
||||
|
||||
value = '1990-0'
|
||||
value = "1990-0"
|
||||
|
||||
field_name = 'dc.date.issued'
|
||||
field_name = "dc.date.issued"
|
||||
|
||||
check.date(value, field_name)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid date ({field_name}): {value}\n'
|
||||
assert (
|
||||
captured.out == f"{Fore.RED}Invalid date ({field_name}): {Fore.RESET}{value}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_valid_date():
|
||||
'''Test checking valid ISO8601 date.'''
|
||||
"""Test checking valid ISO8601 date."""
|
||||
|
||||
value = '1990'
|
||||
value = "1990"
|
||||
|
||||
field_name = 'dc.date.issued'
|
||||
field_name = "dc.date.issued"
|
||||
|
||||
result = check.date(value, field_name)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_suspicious_characters(capsys):
|
||||
'''Test checking for suspicious characters.'''
|
||||
"""Test checking for suspicious characters."""
|
||||
|
||||
value = 'foreˆt'
|
||||
value = "foreˆt"
|
||||
|
||||
field_name = 'dc.contributor.author'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
check.suspicious_characters(value, field_name)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Suspicious character ({field_name}): ˆt\n'
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Suspicious character ({field_name}): {Fore.RESET}ˆt\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_valid_iso639_2_language():
|
||||
'''Test valid ISO 639-2 language.'''
|
||||
def test_check_valid_iso639_1_language():
|
||||
"""Test valid ISO 639-1 (alpha 2) language."""
|
||||
|
||||
value = 'ja'
|
||||
value = "ja"
|
||||
|
||||
result = check.language(value)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_valid_iso639_3_language():
|
||||
'''Test invalid ISO 639-3 language.'''
|
||||
"""Test valid ISO 639-3 (alpha 3) language."""
|
||||
|
||||
value = 'eng'
|
||||
value = "eng"
|
||||
|
||||
result = check.language(value)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_invalid_iso639_2_language(capsys):
|
||||
'''Test invalid ISO 639-2 language.'''
|
||||
def test_check_invalid_iso639_1_language(capsys):
|
||||
"""Test invalid ISO 639-1 (alpha 2) language."""
|
||||
|
||||
value = 'jp'
|
||||
value = "jp"
|
||||
|
||||
check.language(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid ISO 639-2 language: {value}\n'
|
||||
assert (
|
||||
captured.out == f"{Fore.RED}Invalid ISO 639-1 language: {Fore.RESET}{value}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_invalid_iso639_3_language(capsys):
|
||||
'''Test invalid ISO 639-3 language.'''
|
||||
"""Test invalid ISO 639-3 (alpha 3) language."""
|
||||
|
||||
value = 'chi'
|
||||
value = "chi"
|
||||
|
||||
check.language(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid ISO 639-3 language: {value}\n'
|
||||
assert (
|
||||
captured.out == f"{Fore.RED}Invalid ISO 639-3 language: {Fore.RESET}{value}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_invalid_language(capsys):
|
||||
'''Test invalid language.'''
|
||||
"""Test invalid language."""
|
||||
|
||||
value = 'Span'
|
||||
value = "Span"
|
||||
|
||||
check.language(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid language: {value}\n'
|
||||
assert captured.out == f"{Fore.RED}Invalid language: {Fore.RESET}{value}\n"
|
||||
|
||||
|
||||
def test_check_invalid_agrovoc(capsys):
|
||||
'''Test invalid AGROVOC subject.'''
|
||||
"""Test invalid AGROVOC subject. Invalid values *will not* be dropped."""
|
||||
|
||||
value = 'FOREST'
|
||||
field_name = 'dc.subject'
|
||||
valid_agrovoc = "LIVESTOCK"
|
||||
invalid_agrovoc = "FOREST"
|
||||
value = f"{valid_agrovoc}||{invalid_agrovoc}"
|
||||
field_name = "dcterms.subject"
|
||||
drop = False
|
||||
|
||||
check.agrovoc(value, field_name)
|
||||
new_value = check.agrovoc(value, field_name, drop)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Invalid AGROVOC ({field_name}): {value}\n'
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.RED}Invalid AGROVOC ({field_name}): {Fore.RESET}{invalid_agrovoc}\n"
|
||||
)
|
||||
assert new_value == value
|
||||
|
||||
|
||||
def test_check_invalid_agrovoc_dropped(capsys):
|
||||
"""Test invalid AGROVOC subjects. Invalid values *will* be dropped."""
|
||||
|
||||
valid_agrovoc = "LIVESTOCK"
|
||||
invalid_agrovoc = "FOREST"
|
||||
value = f"{valid_agrovoc}||{invalid_agrovoc}"
|
||||
field_name = "dcterms.subject"
|
||||
drop = True
|
||||
|
||||
new_value = check.agrovoc(value, field_name, drop)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.GREEN}Dropping invalid AGROVOC ({field_name}): {Fore.RESET}{invalid_agrovoc}\n"
|
||||
)
|
||||
assert new_value == valid_agrovoc
|
||||
|
||||
|
||||
def test_check_valid_agrovoc():
|
||||
'''Test valid AGROVOC subject.'''
|
||||
"""Test valid AGROVOC subject."""
|
||||
|
||||
value = 'FORESTS'
|
||||
field_name = 'dc.subject'
|
||||
value = "FORESTS"
|
||||
field_name = "dcterms.subject"
|
||||
drop = False
|
||||
|
||||
result = check.agrovoc(value, field_name)
|
||||
result = check.agrovoc(value, field_name, drop)
|
||||
|
||||
assert result == value
|
||||
assert result == "FORESTS"
|
||||
|
||||
|
||||
def test_check_uncommon_filename_extension(capsys):
|
||||
'''Test uncommon filename extension.'''
|
||||
"""Test uncommon filename extension."""
|
||||
|
||||
value = 'file.pdf.lck'
|
||||
value = "file.pdf.lck"
|
||||
|
||||
check.filename_extension(value)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == f'Filename with uncommon extension: {value}\n'
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Filename with uncommon extension: {Fore.RESET}{value}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_common_filename_extension():
|
||||
'''Test common filename extension.'''
|
||||
"""Test common filename extension."""
|
||||
|
||||
value = 'file.pdf'
|
||||
value = "file.pdf"
|
||||
|
||||
result = check.filename_extension(value)
|
||||
|
||||
assert result == value
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_incorrect_iso_639_1_language(capsys):
|
||||
"""Test incorrect ISO 639-1 language, as determined by comparing the item's language field with the actual language predicted in the item's title."""
|
||||
|
||||
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
|
||||
language = "es"
|
||||
exclude = []
|
||||
|
||||
# Create a dictionary to mimic Pandas series
|
||||
row = {"dc.title": title, "dc.language.iso": language}
|
||||
series = pd.Series(row)
|
||||
|
||||
experimental.correct_language(series, exclude)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Possibly incorrect language {language} (detected en): {Fore.RESET}{title}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_incorrect_iso_639_3_language(capsys):
|
||||
"""Test incorrect ISO 639-3 language, as determined by comparing the item's language field with the actual language predicted in the item's title."""
|
||||
|
||||
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
|
||||
language = "spa"
|
||||
exclude = []
|
||||
|
||||
# Create a dictionary to mimic Pandas series
|
||||
row = {"dc.title": title, "dc.language.iso": language}
|
||||
series = pd.Series(row)
|
||||
|
||||
experimental.correct_language(series, exclude)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Possibly incorrect language {language} (detected eng): {Fore.RESET}{title}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_correct_iso_639_1_language():
|
||||
"""Test correct ISO 639-1 language, as determined by comparing the item's language field with the actual language predicted in the item's title."""
|
||||
|
||||
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
|
||||
language = "en"
|
||||
exclude = []
|
||||
|
||||
# Create a dictionary to mimic Pandas series
|
||||
row = {"dc.title": title, "dc.language.iso": language}
|
||||
series = pd.Series(row)
|
||||
|
||||
result = experimental.correct_language(series, exclude)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_correct_iso_639_3_language():
|
||||
"""Test correct ISO 639-3 language, as determined by comparing the item's language field with the actual language predicted in the item's title."""
|
||||
|
||||
title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle"
|
||||
language = "eng"
|
||||
exclude = []
|
||||
|
||||
# Create a dictionary to mimic Pandas series
|
||||
row = {"dc.title": title, "dc.language.iso": language}
|
||||
series = pd.Series(row)
|
||||
|
||||
result = experimental.correct_language(series, exclude)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_valid_spdx_license_identifier():
|
||||
"""Test valid SPDX license identifier."""
|
||||
|
||||
license = "CC-BY-SA-4.0"
|
||||
|
||||
result = check.spdx_license_identifier(license)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_invalid_spdx_license_identifier(capsys):
|
||||
"""Test invalid SPDX license identifier."""
|
||||
|
||||
license = "CC-BY-SA"
|
||||
|
||||
check.spdx_license_identifier(license)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{license}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_duplicate_item(capsys):
|
||||
"""Test item with duplicate title, type, and date."""
|
||||
|
||||
item_title = "Title"
|
||||
item_type = "Report"
|
||||
item_date = "2021-03-17"
|
||||
|
||||
d = {
|
||||
"dc.title": [item_title, item_title],
|
||||
"dcterms.type": [item_type, item_type],
|
||||
"dcterms.issued": [item_date, item_date],
|
||||
}
|
||||
df = pd.DataFrame(data=d)
|
||||
|
||||
check.duplicate_items(df)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Possible duplicate (dc.title): {Fore.RESET}{item_title}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_no_mojibake():
|
||||
"""Test string with no mojibake."""
|
||||
|
||||
field = "CIAT Publicaçao"
|
||||
field_name = "dcterms.isPartOf"
|
||||
|
||||
result = check.mojibake(field, field_name)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_mojibake(capsys):
|
||||
"""Test string with mojibake."""
|
||||
|
||||
field = "CIAT Publicaçao"
|
||||
field_name = "dcterms.isPartOf"
|
||||
|
||||
check.mojibake(field, field_name)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Possible encoding issue ({field_name}): {Fore.RESET}{field}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_check_doi_field():
|
||||
"""Test an item with a DOI field."""
|
||||
|
||||
doi = "https://doi.org/10.1186/1743-422X-9-218"
|
||||
citation = "Orth, A. 2021. Testing all the things. doi: 10.1186/1743-422X-9-218"
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series), with
|
||||
# the citation and a DOI field.
|
||||
d = {"cg.identifier.doi": doi, "dcterms.bibliographicCitation": citation}
|
||||
series = pd.Series(data=d)
|
||||
exclude = []
|
||||
|
||||
result = check.citation_doi(series, exclude)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_check_doi_only_in_citation(capsys):
|
||||
"""Test an item with a DOI in its citation, but no DOI field."""
|
||||
|
||||
citation = "Orth, A. 2021. Testing all the things. doi: 10.1186/1743-422X-9-218"
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series), with
|
||||
# an empty DOI field and a citation containing a DOI.
|
||||
d = {"cg.identifier.doi": None, "dcterms.bibliographicCitation": citation}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
check.citation_doi(series, exclude)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}DOI in citation, but missing a DOI field: {Fore.RESET}{citation}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_title_in_citation():
|
||||
"""Test an item with its title in the citation."""
|
||||
|
||||
title = "Testing all the things"
|
||||
citation = "Orth, A. 2021. Testing all the things."
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series), with
|
||||
# the title and citation.
|
||||
d = {"dc.title": title, "dcterms.bibliographicCitation": citation}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
result = check.title_in_citation(series, exclude)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_title_not_in_citation(capsys):
|
||||
"""Test an item with its title missing from the citation."""
|
||||
|
||||
title = "Testing all the things"
|
||||
citation = "Orth, A. 2021. Testing all teh things."
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series), with
|
||||
# the title and citation.
|
||||
d = {"dc.title": title, "dcterms.bibliographicCitation": citation}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
check.title_in_citation(series, exclude)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Title is not present in citation: {Fore.RESET}{title}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_country_matches_region():
|
||||
"""Test an item with regions matching its country list."""
|
||||
|
||||
country = "Kenya"
|
||||
region = "Eastern Africa"
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series)
|
||||
d = {"cg.coverage.country": country, "cg.coverage.region": region}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
result = check.countries_match_regions(series, exclude)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_country_not_matching_region(capsys):
|
||||
"""Test an item with regions not matching its country list."""
|
||||
|
||||
title = "Testing an item with no matching region."
|
||||
country = "Kenya"
|
||||
region = ""
|
||||
missing_region = "Eastern Africa"
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series)
|
||||
d = {
|
||||
"dc.title": title,
|
||||
"cg.coverage.country": country,
|
||||
"cg.coverage.region": region,
|
||||
}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
check.countries_match_regions(series, exclude)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
captured.out
|
||||
== f"{Fore.YELLOW}Missing region ({country} → {missing_region}): {Fore.RESET}{title}\n"
|
||||
)
|
||||
|
@ -1,68 +1,162 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import csv_metadata_quality.fix as fix
|
||||
|
||||
|
||||
def test_fix_leading_whitespace():
|
||||
'''Test fixing leading whitespace.'''
|
||||
"""Test fixing leading whitespace."""
|
||||
|
||||
value = ' Alan'
|
||||
value = " Alan"
|
||||
|
||||
assert fix.whitespace(value) == 'Alan'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.whitespace(value, field_name) == "Alan"
|
||||
|
||||
|
||||
def test_fix_trailing_whitespace():
|
||||
'''Test fixing trailing whitespace.'''
|
||||
"""Test fixing trailing whitespace."""
|
||||
|
||||
value = 'Alan '
|
||||
value = "Alan "
|
||||
|
||||
assert fix.whitespace(value) == 'Alan'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.whitespace(value, field_name) == "Alan"
|
||||
|
||||
|
||||
def test_fix_excessive_whitespace():
|
||||
'''Test fixing excessive whitespace.'''
|
||||
"""Test fixing excessive whitespace."""
|
||||
|
||||
value = 'Alan Orth'
|
||||
value = "Alan Orth"
|
||||
|
||||
assert fix.whitespace(value) == 'Alan Orth'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.whitespace(value, field_name) == "Alan Orth"
|
||||
|
||||
|
||||
def test_fix_invalid_separators():
|
||||
'''Test fixing invalid multi-value separators.'''
|
||||
"""Test fixing invalid multi-value separators."""
|
||||
|
||||
value = 'Alan|Orth'
|
||||
value = "Alan|Orth"
|
||||
|
||||
assert fix.separators(value) == 'Alan||Orth'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.separators(value, field_name) == "Alan||Orth"
|
||||
|
||||
|
||||
def test_fix_unnecessary_separators():
|
||||
"""Test fixing unnecessary multi-value separators."""
|
||||
|
||||
field = "Alan||Orth||"
|
||||
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.separators(field, field_name) == "Alan||Orth"
|
||||
|
||||
|
||||
def test_fix_unnecessary_unicode():
|
||||
'''Test fixing unnecessary Unicode.'''
|
||||
"""Test fixing unnecessary Unicode."""
|
||||
|
||||
value = 'Alan Orth'
|
||||
value = "Alan Orth"
|
||||
|
||||
assert fix.unnecessary_unicode(value) == 'Alan Orth'
|
||||
assert fix.unnecessary_unicode(value) == "Alan Orth"
|
||||
|
||||
|
||||
def test_fix_duplicates():
|
||||
'''Test fixing duplicate metadata values.'''
|
||||
"""Test fixing duplicate metadata values."""
|
||||
|
||||
value = 'Kenya||Kenya'
|
||||
value = "Kenya||Kenya"
|
||||
|
||||
assert fix.duplicates(value) == 'Kenya'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.duplicates(value, field_name) == "Kenya"
|
||||
|
||||
|
||||
def test_fix_newlines():
|
||||
'''Test fixing newlines.'''
|
||||
"""Test fixing newlines."""
|
||||
|
||||
value = '''Ken
|
||||
ya'''
|
||||
value = """Ken
|
||||
ya"""
|
||||
field_name = "dcterms.subject"
|
||||
|
||||
assert fix.newlines(value) == 'Kenya'
|
||||
assert fix.newlines(value, field_name) == "Kenya"
|
||||
|
||||
|
||||
def test_fix_comma_space():
|
||||
'''Test adding space after comma.'''
|
||||
"""Test adding space after comma."""
|
||||
|
||||
value = 'Orth,Alan S.'
|
||||
value = "Orth,Alan S."
|
||||
|
||||
field_name = 'dc.contributor.author'
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.comma_space(value, field_name) == 'Orth, Alan S.'
|
||||
assert fix.comma_space(value, field_name) == "Orth, Alan S."
|
||||
|
||||
|
||||
def test_fix_normalized_unicode():
|
||||
"""Test fixing a string that is already in its normalized (NFC) Unicode form."""
|
||||
|
||||
# string using the normalized canonical form of é
|
||||
value = "Ouédraogo, Mathieu"
|
||||
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.normalize_unicode(value, field_name) == "Ouédraogo, Mathieu"
|
||||
|
||||
|
||||
def test_fix_decomposed_unicode():
|
||||
"""Test fixing a string that contains Unicode string."""
|
||||
|
||||
# string using the decomposed form of é
|
||||
value = "Ouédraogo, Mathieu"
|
||||
|
||||
field_name = "dc.contributor.author"
|
||||
|
||||
assert fix.normalize_unicode(value, field_name) == "Ouédraogo, Mathieu"
|
||||
|
||||
|
||||
def test_fix_mojibake():
|
||||
"""Test string with no mojibake."""
|
||||
|
||||
field = "CIAT Publicaçao"
|
||||
field_name = "dcterms.isPartOf"
|
||||
|
||||
assert fix.mojibake(field, field_name) == "CIAT Publicaçao"
|
||||
|
||||
|
||||
def test_fix_country_not_matching_region():
|
||||
"""Test an item with regions not matching its country list."""
|
||||
|
||||
title = "Testing an item with no matching region."
|
||||
country = "Kenya"
|
||||
region = ""
|
||||
missing_region = "Eastern Africa"
|
||||
exclude = []
|
||||
|
||||
# Emulate a column in a transposed dataframe (which is just a series)
|
||||
d = {
|
||||
"dc.title": title,
|
||||
"cg.coverage.country": country,
|
||||
"cg.coverage.region": region,
|
||||
}
|
||||
series = pd.Series(data=d)
|
||||
|
||||
result = fix.countries_match_regions(series, exclude)
|
||||
|
||||
# Emulate the correct series we are expecting
|
||||
d_correct = {
|
||||
"dc.title": title,
|
||||
"cg.coverage.country": country,
|
||||
"cg.coverage.region": missing_region,
|
||||
}
|
||||
series_correct = pd.Series(data=d_correct)
|
||||
|
||||
pd.testing.assert_series_equal(result, series_correct)
|
||||
|
||||
|
||||
def test_fix_normalize_dois():
|
||||
"""Test normalizing a DOI."""
|
||||
|
||||
value = "doi: 10.11648/j.jps.20140201.14"
|
||||
|
||||
assert fix.normalize_dois(value) == "https://doi.org/10.11648/j.jps.20140201.14"
|
||||
|
Reference in New Issue
Block a user