mirror of
https://github.com/alanorth/cgspace-notes.git
synced 2024-12-22 21:22:19 +01:00
537 lines
27 KiB
HTML
537 lines
27 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en" >
|
|
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
|
|
|
|
|
<meta property="og:title" content="October, 2021" />
|
|
<meta property="og:description" content="2021-10-01
|
|
|
|
Export all affiliations on CGSpace and run them against the latest RoR data dump:
|
|
|
|
localhost/dspace63= > \COPY (SELECT DISTINCT text_value as "cg.contributor.affiliation", count(*) FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id = 211 GROUP BY text_value ORDER BY count DESC) to /tmp/2021-10-01-affiliations.csv WITH CSV HEADER;
|
|
$ csvcut -c 1 /tmp/2021-10-01-affiliations.csv | sed 1d > /tmp/2021-10-01-affiliations.txt
|
|
$ ./ilri/ror-lookup.py -i /tmp/2021-10-01-affiliations.txt -r 2021-09-23-ror-data.json -o /tmp/2021-10-01-affili
|
|
ations-matching.csv
|
|
$ csvgrep -c matched -m true /tmp/2021-10-01-affiliations-matching.csv | sed 1d | wc -l
|
|
1879
|
|
$ wc -l /tmp/2021-10-01-affiliations.txt
|
|
7100 /tmp/2021-10-01-affiliations.txt
|
|
|
|
So we have 1879/7100 (26.46%) matching already
|
|
" />
|
|
<meta property="og:type" content="article" />
|
|
<meta property="og:url" content="https://alanorth.github.io/cgspace-notes/2021-10/" />
|
|
<meta property="article:published_time" content="2021-10-01T11:14:07+03:00" />
|
|
<meta property="article:modified_time" content="2021-10-09T22:00:59+03:00" />
|
|
|
|
|
|
|
|
<meta name="twitter:card" content="summary"/>
|
|
<meta name="twitter:title" content="October, 2021"/>
|
|
<meta name="twitter:description" content="2021-10-01
|
|
|
|
Export all affiliations on CGSpace and run them against the latest RoR data dump:
|
|
|
|
localhost/dspace63= > \COPY (SELECT DISTINCT text_value as "cg.contributor.affiliation", count(*) FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id = 211 GROUP BY text_value ORDER BY count DESC) to /tmp/2021-10-01-affiliations.csv WITH CSV HEADER;
|
|
$ csvcut -c 1 /tmp/2021-10-01-affiliations.csv | sed 1d > /tmp/2021-10-01-affiliations.txt
|
|
$ ./ilri/ror-lookup.py -i /tmp/2021-10-01-affiliations.txt -r 2021-09-23-ror-data.json -o /tmp/2021-10-01-affili
|
|
ations-matching.csv
|
|
$ csvgrep -c matched -m true /tmp/2021-10-01-affiliations-matching.csv | sed 1d | wc -l
|
|
1879
|
|
$ wc -l /tmp/2021-10-01-affiliations.txt
|
|
7100 /tmp/2021-10-01-affiliations.txt
|
|
|
|
So we have 1879/7100 (26.46%) matching already
|
|
"/>
|
|
<meta name="generator" content="Hugo 0.88.1" />
|
|
|
|
|
|
|
|
<script type="application/ld+json">
|
|
{
|
|
"@context": "http://schema.org",
|
|
"@type": "BlogPosting",
|
|
"headline": "October, 2021",
|
|
"url": "https://alanorth.github.io/cgspace-notes/2021-10/",
|
|
"wordCount": "2199",
|
|
"datePublished": "2021-10-01T11:14:07+03:00",
|
|
"dateModified": "2021-10-09T22:00:59+03:00",
|
|
"author": {
|
|
"@type": "Person",
|
|
"name": "Alan Orth"
|
|
},
|
|
"keywords": "Notes"
|
|
}
|
|
</script>
|
|
|
|
|
|
|
|
<link rel="canonical" href="https://alanorth.github.io/cgspace-notes/2021-10/">
|
|
|
|
<title>October, 2021 | CGSpace Notes</title>
|
|
|
|
|
|
<!-- combined, minified CSS -->
|
|
|
|
<link href="https://alanorth.github.io/cgspace-notes/css/style.beb8012edc08ba10be012f079d618dc243812267efe62e11f22fe49618f976a4.css" rel="stylesheet" integrity="sha256-vrgBLtwIuhC+AS8HnWGNwkOBImfv5i4R8i/klhj5dqQ=" crossorigin="anonymous">
|
|
|
|
|
|
<!-- minified Font Awesome for SVG icons -->
|
|
|
|
<script defer src="https://alanorth.github.io/cgspace-notes/js/fontawesome.min.f5072c55a0721857184db93a50561d7dc13975b4de2e19db7f81eb5f3fa57270.js" integrity="sha256-9QcsVaByGFcYTbk6UFYdfcE5dbTeLhnbf4HrXz+lcnA=" crossorigin="anonymous"></script>
|
|
|
|
<!-- RSS 2.0 feed -->
|
|
|
|
|
|
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
|
|
<div class="blog-masthead">
|
|
<div class="container">
|
|
<nav class="nav blog-nav">
|
|
<a class="nav-link " href="https://alanorth.github.io/cgspace-notes/">Home</a>
|
|
</nav>
|
|
</div>
|
|
</div>
|
|
|
|
|
|
|
|
|
|
<header class="blog-header">
|
|
<div class="container">
|
|
<h1 class="blog-title" dir="auto"><a href="https://alanorth.github.io/cgspace-notes/" rel="home">CGSpace Notes</a></h1>
|
|
<p class="lead blog-description" dir="auto">Documenting day-to-day work on the <a href="https://cgspace.cgiar.org">CGSpace</a> repository.</p>
|
|
</div>
|
|
</header>
|
|
|
|
|
|
|
|
|
|
<div class="container">
|
|
<div class="row">
|
|
<div class="col-sm-8 blog-main">
|
|
|
|
|
|
|
|
|
|
<article class="blog-post">
|
|
<header>
|
|
<h2 class="blog-post-title" dir="auto"><a href="https://alanorth.github.io/cgspace-notes/2021-10/">October, 2021</a></h2>
|
|
<p class="blog-post-meta">
|
|
<time datetime="2021-10-01T11:14:07+03:00">Fri Oct 01, 2021</time>
|
|
in
|
|
<span class="fas fa-folder" aria-hidden="true"></span> <a href="/cgspace-notes/categories/notes/" rel="category tag">Notes</a>
|
|
|
|
|
|
</p>
|
|
</header>
|
|
<h2 id="2021-10-01">2021-10-01</h2>
|
|
<ul>
|
|
<li>Export all affiliations on CGSpace and run them against the latest RoR data dump:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">localhost/dspace63= > \COPY (SELECT DISTINCT text_value as "cg.contributor.affiliation", count(*) FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id = 211 GROUP BY text_value ORDER BY count DESC) to /tmp/2021-10-01-affiliations.csv WITH CSV HEADER;
|
|
$ csvcut -c 1 /tmp/2021-10-01-affiliations.csv | sed 1d > /tmp/2021-10-01-affiliations.txt
|
|
$ ./ilri/ror-lookup.py -i /tmp/2021-10-01-affiliations.txt -r 2021-09-23-ror-data.json -o /tmp/2021-10-01-affili
|
|
ations-matching.csv
|
|
$ csvgrep -c matched -m true /tmp/2021-10-01-affiliations-matching.csv | sed 1d | wc -l
|
|
1879
|
|
$ wc -l /tmp/2021-10-01-affiliations.txt
|
|
7100 /tmp/2021-10-01-affiliations.txt
|
|
</code></pre><ul>
|
|
<li>So we have 1879/7100 (26.46%) matching already</li>
|
|
</ul>
|
|
<h2 id="2021-10-03">2021-10-03</h2>
|
|
<ul>
|
|
<li>Dominique from IWMI asked me for information about how CGSpace partners are using CGSpace APIs to feed their websites</li>
|
|
<li>Start a fresh indexing on AReS</li>
|
|
<li>Udana sent me his file of 292 non-IWMI publications for the Virtual library on water management
|
|
<ul>
|
|
<li>He added licenses</li>
|
|
<li>I want to clean up the <code>dcterms.extent</code> field though because it has volume, issue, and pages there</li>
|
|
<li>I cloned the column several times and extracted values based on their positions, for example:
|
|
<ul>
|
|
<li>Volume: <code>value.partition(":")[0]</code></li>
|
|
<li>Issue: <code>value.partition("(")[2].partition(")")[0]</code></li>
|
|
<li>Page: <code>"p. " + value.replace(".", "")</code></li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<h2 id="2021-10-04">2021-10-04</h2>
|
|
<ul>
|
|
<li>Start looking at the last month of Solr statistics on CGSpace
|
|
<ul>
|
|
<li>I see a number of IPs with “normal” user agents who clearly behave like bots
|
|
<ul>
|
|
<li>198.15.130.18: 21,000 requests to /discover with a normal-looking user agent, from ASN 11282 (SERVERYOU, US)</li>
|
|
<li>93.158.90.107: 8,500 requests to handle and browse links with a Firefox 84.0 user agent, from ASN 12552 (IPO-EU, SE)</li>
|
|
<li>193.235.141.162: 4,800 requests to handle, browse, and discovery links with a Firefox 84.0 user agent, from ASN 51747 (INTERNETBOLAGET, SE)</li>
|
|
<li>3.225.28.105: 2,900 requests to REST API for the CIAT Story Maps collection with a normal user agent, from ASN 14618 (AMAZON-AES, US)</li>
|
|
<li>34.228.236.6: 2,800 requests to discovery for the CGIAR System community with user agent <code>Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)</code>, from ASN 14618 (AMAZON-AES, US)</li>
|
|
<li>18.212.137.2: 2,800 requests to discovery for the CGIAR System community with user agent <code>Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)</code>, from ASN 14618 (AMAZON-AES, US)</li>
|
|
<li>3.81.123.72: 2,800 requests to discovery and handles for the CGIAR System community with user agent <code>Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)</code>, from ASN 14618 (AMAZON-AES, US)</li>
|
|
<li>3.227.16.188: 2,800 requests to discovery and handles for the CGIAR System community with user agent <code>Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)</code>, from ASN 14618 (AMAZON-AES, US)</li>
|
|
</ul>
|
|
</li>
|
|
<li>Looking closer into the requests with this Mozilla/4.0 user agent, I see 500+ IPs using it:</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console"># zcat --force /var/log/nginx/*.log* | grep 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)' | awk '{print $1}' | sort | uniq > /tmp/mozilla-4.0-ips.txt
|
|
# wc -l /tmp/mozilla-4.0-ips.txt
|
|
543 /tmp/mozilla-4.0-ips.txt
|
|
</code></pre><ul>
|
|
<li>Then I resolved the IPs and extracted the ones belonging to Amazon:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ ./ilri/resolve-addresses-geoip2.py -i /tmp/mozilla-4.0-ips.txt -k "$ABUSEIPDB_API_KEY" -o /tmp/mozilla-4.0-ips.csv
|
|
$ csvgrep -c asn -m 14618 /tmp/mozilla-4.0-ips.csv | csvcut -c ip | sed 1d | tee /tmp/amazon-ips.txt | wc -l
|
|
</code></pre><ul>
|
|
<li>I am thinking I will purge them all, as I have several indicators that they are bots: mysterious user agent, IP owned by Amazon</li>
|
|
<li>Even more interesting, these requests are weighted VERY heavily on the CGIAR System community:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console"> 1592 GET /handle/10947/2526
|
|
1592 GET /handle/10947/2527
|
|
1592 GET /handle/10947/34
|
|
1593 GET /handle/10947/6
|
|
1594 GET /handle/10947/1
|
|
1598 GET /handle/10947/2515
|
|
1598 GET /handle/10947/2516
|
|
1599 GET /handle/10568/101335
|
|
1599 GET /handle/10568/91688
|
|
1599 GET /handle/10947/2517
|
|
1599 GET /handle/10947/2518
|
|
1599 GET /handle/10947/2519
|
|
1599 GET /handle/10947/2708
|
|
1599 GET /handle/10947/2871
|
|
1600 GET /handle/10568/89342
|
|
1600 GET /handle/10947/4467
|
|
1607 GET /handle/10568/103816
|
|
290382 GET /handle/10568/83389
|
|
</code></pre><ul>
|
|
<li>Before I purge all those I will ask someone Samuel Stacey from the System Office to hopefully get an insight…</li>
|
|
<li>Meeting with Michael Victor, Peter, Jane, and Abenet about the future of repositories in the One CGIAR</li>
|
|
<li>Meeting with Michelle from Altmetric about their new CSV upload system
|
|
<ul>
|
|
<li>I sent her some examples of Handles that have DOIs, but no linked score (yet) to see if an association will be created when she uploads them</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-csv" data-lang="csv">doi,handle
|
|
10.1016/j.agsy.2021.103263,10568/115288
|
|
10.3389/fgene.2021.723360,10568/115287
|
|
10.3389/fpls.2021.720670,10568/115285
|
|
</code></pre><ul>
|
|
<li>Extract the AGROVOC subjects from IWMI’s 292 publications to validate them against AGROVOC:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -c 'dcterms.subject[en_US]' ~/Downloads/2021-10-03-non-IWMI-publications.csv | sed -e 1d -e 's/||/\n/g' -e 's/"//g' | sort -u > /tmp/agrovoc.txt
|
|
$ ./ilri/agrovoc-lookup.py -i /tmp/agrovoc-sorted.txt -o /tmp/agrovoc-matches.csv
|
|
$ csvgrep -c 'number of matches' -m '0' /tmp/agrovoc-matches.csv | csvcut -c 1 > /tmp/invalid-agrovoc.csv
|
|
</code></pre><h2 id="2021-10-05">2021-10-05</h2>
|
|
<ul>
|
|
<li>Sam put me in touch with Dodi from the System Office web team and he confirmed that the Amazon requests are not theirs
|
|
<ul>
|
|
<li>I added <code>Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)</code> to the list of bad bots in nginx</li>
|
|
<li>I purged all the Amazon IPs using this user agent, as well as the few other IPs I identified yesterday</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ ./ilri/check-spider-ip-hits.sh -f /tmp/robot-ips.txt -p
|
|
...
|
|
|
|
Total number of bot hits purged: 465119
|
|
</code></pre><h2 id="2021-10-06">2021-10-06</h2>
|
|
<ul>
|
|
<li>Thinking about how we could check for duplicates before importing
|
|
<ul>
|
|
<li>I found out that <a href="https://www.freecodecamp.org/news/fuzzy-string-matching-with-postgresql/">PostgreSQL has a built-in similarity function</a>:</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">localhost/dspace63= > CREATE EXTENSION pg_trgm;
|
|
localhost/dspace63= > SELECT metadata_value_id, text_value, dspace_object_id FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id=64 AND SIMILARITY(text_value,'Molecular marker based genetic diversity assessment of Striga resistant maize inbred lines') > 0.5;
|
|
metadata_value_id │ text_value │ dspace_object_id
|
|
───────────────────┼────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────────────
|
|
3652624 │ Molecular marker based genetic diversity assessment of Striga resistant maize inbred lines │ b7f0bf12-b183-4b2f-bbd2-7a5697b0c467
|
|
3677663 │ Molecular marker based genetic diversity assessment of Striga resistant maize inbred lines │ fb62f551-f4a5-4407-8cdc-6bff6dac399e
|
|
(2 rows)
|
|
</code></pre><ul>
|
|
<li>I was able to find an exact duplicate for an IITA item by searching for its title (I already knew that these existed)</li>
|
|
<li>I started working on a basic Python script to do this and managed to find an actual duplicate in the recent IWMI items
|
|
<ul>
|
|
<li>I think I will check for similar titles, and if I find them I will print out the handles for verification</li>
|
|
<li>I could also proceed to check other metadata like type because those shouldn’t vary too much</li>
|
|
</ul>
|
|
</li>
|
|
<li>I ran my new <code>check-duplicates.py</code> script on the 292 non-IWMI publications from Udana and found twelve potential duplicates
|
|
<ul>
|
|
<li>Upon checking them manually, I found that 7/12 were indeed already present on CGSpace!</li>
|
|
<li>This is with the similarity threshold at 0.5. I wonder if tweaking that higher will make the script run faster and eliminate some false positives</li>
|
|
<li>I re-ran it with higher thresholds this eliminated all false positives, but it still took 24 minutes to run for 292 items!
|
|
<ul>
|
|
<li>0.6: ./ilri/check-duplicates.py -i ~/Downloads/2021-10-03-non-IWMI-publications.cs 0.09s user 0.03s system 0% cpu 24:40.42 total</li>
|
|
<li>0.7: ./ilri/check-duplicates.py -i ~/Downloads/2021-10-03-non-IWMI-publications.cs 0.12s user 0.03s system 0% cpu 24:29.15 total</li>
|
|
<li>0.8: ./ilri/check-duplicates.py -i ~/Downloads/2021-10-03-non-IWMI-publications.cs 0.09s user 0.03s system 0% cpu 25:44.13 total</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
<li>Some minor updates to csv-metadata-quality
|
|
<ul>
|
|
<li>Fix two issues with regular expressions in the duplicate items and experimental language checks</li>
|
|
<li>Add a check for items that have a DOI listed in their citation, but are missing a standalone DOI field</li>
|
|
</ul>
|
|
</li>
|
|
<li>Then I ran this new version of csv-metadata-quality on an export of IWMI’s community, minus some fields I don’t want to check:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -C 'dc.date.accessioned,dc.date.accessioned[],dc.date.accessioned[en_US],dc.date.available,dc.date.available[],dc.date.available[en_US],dcterms.issued[en_US],dcterms.issued[],dcterms.issued,dc.description.provenance[en],dc.description.provenance[en_US],dc.identifier.uri,dc.identifier.uri[],dc.identifier.uri[en_US],dcterms.abstract[en_US],dcterms.bibliographicCitation[en_US],collection' ~/Downloads/iwmi.csv > /tmp/iwmi-to-check.csv
|
|
$ csv-metadata-quality -i /tmp/iwmi-to-check.csv -o /tmp/iwmi.csv | tee /tmp/out.log
|
|
$ xsv split -s 2000 /tmp /tmp/iwmi.csv
|
|
</code></pre><ul>
|
|
<li>I noticed each CSV only had 10 or 20 corrections, mostly that none of the duplicate metadata values were removed in the CSVs…
|
|
<ul>
|
|
<li>I cut a subset of the fields from the main CSV and tried again, but DSpace said “no changes detected”</li>
|
|
<li>The duplicates are definitely removed from the CSV, but DSpace doesn’t detect them</li>
|
|
<li>I realized this is an issue I’ve had before, but forgot because I usually use csv-metadata-quality for new items, not ones already inside DSpace!</li>
|
|
<li>I found a comment on thread on the dspace-tech mailing list from helix84 in 2015 (“No changes were detected” when importing metadata via XMLUI") where he says:</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<blockquote>
|
|
<p>It’s very likely that multiple values in a single field are being compared as an unordered set rather than an ordered list.
|
|
Try doing it in two imports. In first import, remove all authors. In second import, add them in the new order.</p>
|
|
</blockquote>
|
|
<ul>
|
|
<li>Shit, so that’s worth looking into…</li>
|
|
</ul>
|
|
<h2 id="2021-10-07">2021-10-07</h2>
|
|
<ul>
|
|
<li>I decided to upload the cleaned IWMI community by moving the cleaned metadata field from <code>dcterms.subject[en_US]</code> to <code>dcterms.subject[en_Fu]</code> temporarily, uploading them, then moving them back, and uploading again
|
|
<ul>
|
|
<li>I started by copying just a handful of fields from the iwmi.csv community export:</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -c 'id,cg.contributor.affiliation[en_US],cg.coverage.country[en_US],cg.coverage.iso3166-alpha2[en_US],cg.coverage.subregion[en_US],cg.identifier.doi[en_US],cg.identifier.iwmilibrary[en_US],cg.identifier.url[en_US],cg.isijournal[en_US],cg.issn[en_US],cg.river.basin[en_US],dc.contributor.author[en_US],dcterms.subject[en_US]' ~/Downloads/iwmi.csv > /tmp/iwmi-duplicate-metadata.csv
|
|
# Copy and blank columns in OpenRefine
|
|
$ csv-metadata-quality -i ~/Downloads/2021-10-07-IWMI-duplicate-metadata-csv.csv -o /tmp/iwmi-duplicates-cleaned.csv | tee /tmp/out.log
|
|
$ xsv split -s 2000 /tmp /tmp/iwmi-duplicates-cleaned.csv
|
|
</code></pre><ul>
|
|
<li>It takes a few hours per 2,000 items because DSpace processes them so slowly… sigh…</li>
|
|
</ul>
|
|
<h2 id="2021-10-08">2021-10-08</h2>
|
|
<ul>
|
|
<li>I decided to update these records in PostgreSQL instead of via several CSV batches, as there were several others to normalize too:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">cgspace=# SELECT DISTINCT text_lang, count(text_lang) FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) GROUP BY text_lang ORDER BY count DESC;
|
|
text_lang | count
|
|
-----------+---------
|
|
en_US | 2603711
|
|
en_Fu | 115568
|
|
en | 8818
|
|
| 5286
|
|
fr | 2
|
|
vn | 2
|
|
| 0
|
|
(7 rows)
|
|
cgspace=# BEGIN;
|
|
cgspace=# UPDATE metadatavalue SET text_lang='en_US' WHERE dspace_object_id IN (SELECT uuid FROM item) AND text_lang IN ('en_Fu', 'en', '');
|
|
UPDATE 129673
|
|
cgspace=# COMMIT;
|
|
</code></pre><ul>
|
|
<li>So all this effort to remove ~400 duplicate metadata values in the IWMI community hmmm:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ grep -c 'Removing duplicate value' /tmp/out.log
|
|
391
|
|
</code></pre><ul>
|
|
<li>I tried to export ILRI’s community, but ran into the export bug (DS-4211)
|
|
<ul>
|
|
<li>After applying the patch on my local instance I was able to export, but found many duplicate items in the CSV (as I also noticed in 2021-02):</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -c id /tmp/ilri-duplicate-metadata.csv | sed '1d' | wc -l
|
|
32070
|
|
$ csvcut -c id /tmp/ilri-duplicate-metadata.csv | sort -u | sed '1d' | wc -l
|
|
19315
|
|
</code></pre><ul>
|
|
<li>It seems there are only about 200 duplicate values in this subset of fields in ILRI’s community:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ grep -c 'Removing duplicate value' /tmp/out.log
|
|
220
|
|
</code></pre><ul>
|
|
<li>I found a cool way to select only the items with corrections
|
|
<ul>
|
|
<li>First, extract a handful of fields from the CSV with csvcut</li>
|
|
<li>Second, clean the CSV with csv-metadata-quality</li>
|
|
<li>Third, rename the columns to something obvious in the cleaned CSV</li>
|
|
<li>Fourth, use csvjoin to merge the cleaned file with the original</li>
|
|
</ul>
|
|
</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -c 'id,cg.contributor.affiliation[en_US],cg.coverage.country[en_US],cg.coverage.iso3166-alpha2[en_US],cg.coverage.subregion[en_US],cg.identifier.doi[en_US],cg.identifier.url[en_US],cg.isijournal[en_US],cg.issn[en_US],dc.contributor.author[en_US],dcterms.subject[en_US]' /tmp/ilri.csv | csvsort | uniq > /tmp/ilri-deduplicated-items.csv
|
|
$ csv-metadata-quality -i /tmp/ilri-deduplicated-items.csv -o /tmp/ilri-deduplicated-items-cleaned.csv | tee /tmp/out.log
|
|
$ sed -i -e '1s/en_US/en_Fu/g' /tmp/ilri-deduplicated-items-cleaned.csv
|
|
$ csvjoin -c id /tmp/ilri-deduplicated-items.csv /tmp/ilri-deduplicated-items-cleaned.csv > /tmp/ilri-deduplicated-items-cleaned-joined.csv
|
|
</code></pre><ul>
|
|
<li>Then I imported the file into OpenRefine and used a custom text facet with a GREL like this to identify the rows with changes:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code>if(cells['dcterms.subject[en_US]'].value == cells['dcterms.subject[en_Fu]'].value,"same","different")
|
|
</code></pre><ul>
|
|
<li>For these rows I starred them and then blanked out the original field so DSpace would see it as a removal, and add the new column
|
|
<ul>
|
|
<li>After these are uploaded I will normalize the <code>text_lang</code> fields in PostgreSQL again</li>
|
|
</ul>
|
|
</li>
|
|
<li>I did the same for CIAT but there were over 7,000 duplicate metadata values! Hard to believe:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ grep -c 'Removing duplicate value' /tmp/out.log
|
|
7720
|
|
</code></pre><ul>
|
|
<li>I applied these to the CIAT community, so in total that’s over 8,000 duplicate metadata values removed in a handful of fields…</li>
|
|
</ul>
|
|
<h2 id="2021-10-09">2021-10-09</h2>
|
|
<ul>
|
|
<li>I did similar metadata cleanups for CCAFS and IITA too, but there were only a few hundred duplicates there</li>
|
|
<li>Also of note, there are some other fixes too, for example in IITA’s community:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ grep -c -E '(Fixing|Removing) (duplicate|excessive|invalid)' /tmp/out.log
|
|
249
|
|
</code></pre><ul>
|
|
<li>I ran a full Discovery re-indexing on CGSpace</li>
|
|
<li>Then I exported all of CGSpace and extracted the ISSNs and ISBNs:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ csvcut -c 'id,cg.issn[en_US],dc.identifier.issn[en_US],cg.isbn[en_US],dc.identifier.isbn[en_US]' /tmp/cgspace.csv > /tmp/cgspace-issn-isbn.csv
|
|
</code></pre><ul>
|
|
<li>I did cleanups on about seventy items with invalid and mixed ISSNs/ISBNs</li>
|
|
</ul>
|
|
<h2 id="2021-10-10">2021-10-10</h2>
|
|
<ul>
|
|
<li>Start testing DSpace 7.1-SNAPSHOT to see if it has the duplicate item bug on <code>metadata-export</code> (DS-4211)</li>
|
|
<li>First create a new PostgreSQL 13 container:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ podman run --name dspacedb13 -v dspacedb13_data:/var/lib/postgresql/data -e POSTGRES_PASSWORD=postgres -p 5433:5432 -d postgres:13-alpine
|
|
$ createuser -h localhost -p 5433 -U postgres --pwprompt dspacetest
|
|
$ createdb -h localhost -p 5433 -U postgres -O dspacetest --encoding=UNICODE dspace7
|
|
$ psql -h localhost -p 5433 -U postgres dspace7 -c 'CREATE EXTENSION pgcrypto;'
|
|
</code></pre><ul>
|
|
<li>Then edit setting in <code>dspace/config/local.cfg</code> and build the backend server with Java 11:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ mvn package
|
|
$ cd dspace/target/dspace-installer
|
|
$ ant fresh_install
|
|
# fix database not being fully ready, causing Tomcat to fail to start the server application
|
|
$ ~/dspace7/bin/dspace database migrate
|
|
</code></pre><ul>
|
|
<li>Copy Solr configs and start Solr:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ cp -Rv ~/dspace7/solr/* ~/src/solr-8.8.2/server/solr/configsets
|
|
$ ~/src/solr-8.8.2/bin/solr start
|
|
</code></pre><ul>
|
|
<li>Start my local Tomcat 9 instance:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ systemctl --user start tomcat9@dspace7
|
|
</code></pre><ul>
|
|
<li>This works, so now I will drop the default database and import a dump from CGSpace</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ systemctl --user stop tomcat9@dspace7
|
|
$ dropdb -h localhost -p 5433 -U postgres dspace7
|
|
$ createdb -h localhost -p 5433 -U postgres -O dspacetest --encoding=UNICODE dspace7
|
|
$ psql -h localhost -p 5433 -U postgres -c 'alter user dspacetest superuser;'
|
|
$ pg_restore -h localhost -p 5433 -U postgres -d dspace7 -O --role=dspacetest -h localhost dspace-2021-10-09.backup
|
|
$ psql -h localhost -p 5433 -U postgres -c 'alter user dspacetest nosuperuser;'
|
|
</code></pre><ul>
|
|
<li>Delete Atmire migrations and some others that were “unresolved”:</li>
|
|
</ul>
|
|
<pre tabindex="0"><code class="language-console" data-lang="console">$ psql -h localhost -p 5433 -U postgres dspace7 -c "DELETE FROM schema_version WHERE description LIKE '%Atmire%' OR description LIKE '%CUA%' OR description LIKE '%cua%';"
|
|
$ psql -h localhost -p 5433 -U postgres dspace7 -c "DELETE FROM schema_version WHERE version IN ('5.0.2017.09.25', '6.0.2017.01.30', '6.0.2017.09.25');"
|
|
</code></pre><ul>
|
|
<li>Now DSpace 7 starts with my CGSpace data… nice</li>
|
|
<li>I tested the <code>metadata-export</code> on DSpace 7.1-SNAPSHOT and it still has the duplicate items issue introduced by DS-4211
|
|
<ul>
|
|
<li>I filed a GitHub issue and notified nwoodward: <a href="https://github.com/DSpace/DSpace/issues/7988">https://github.com/DSpace/DSpace/issues/7988</a></li>
|
|
</ul>
|
|
</li>
|
|
<li>Start a full reindex on AReS</li>
|
|
</ul>
|
|
<!-- raw HTML omitted -->
|
|
|
|
|
|
|
|
|
|
|
|
</article>
|
|
|
|
|
|
|
|
</div> <!-- /.blog-main -->
|
|
|
|
<aside class="col-sm-3 ml-auto blog-sidebar">
|
|
|
|
|
|
|
|
<section class="sidebar-module">
|
|
<h4>Recent Posts</h4>
|
|
<ol class="list-unstyled">
|
|
|
|
|
|
<li><a href="/cgspace-notes/2021-10/">October, 2021</a></li>
|
|
|
|
<li><a href="/cgspace-notes/2021-09/">September, 2021</a></li>
|
|
|
|
<li><a href="/cgspace-notes/2021-08/">August, 2021</a></li>
|
|
|
|
<li><a href="/cgspace-notes/2021-07/">July, 2021</a></li>
|
|
|
|
<li><a href="/cgspace-notes/2021-06/">June, 2021</a></li>
|
|
|
|
</ol>
|
|
</section>
|
|
|
|
|
|
|
|
|
|
<section class="sidebar-module">
|
|
<h4>Links</h4>
|
|
<ol class="list-unstyled">
|
|
|
|
<li><a href="https://cgspace.cgiar.org">CGSpace</a></li>
|
|
|
|
<li><a href="https://dspacetest.cgiar.org">DSpace Test</a></li>
|
|
|
|
<li><a href="https://github.com/ilri/DSpace">CGSpace @ GitHub</a></li>
|
|
|
|
</ol>
|
|
</section>
|
|
|
|
</aside>
|
|
|
|
|
|
</div> <!-- /.row -->
|
|
</div> <!-- /.container -->
|
|
|
|
|
|
|
|
<footer class="blog-footer">
|
|
<p dir="auto">
|
|
|
|
Blog template created by <a href="https://twitter.com/mdo">@mdo</a>, ported to Hugo by <a href='https://twitter.com/mralanorth'>@mralanorth</a>.
|
|
|
|
</p>
|
|
<p>
|
|
<a href="#">Back to top</a>
|
|
</p>
|
|
</footer>
|
|
|
|
|
|
</body>
|
|
|
|
</html>
|