diff --git a/content/post/2017-11.md b/content/post/2017-11.md index 65d509cc5..3cc4e1609 100644 --- a/content/post/2017-11.md +++ b/content/post/2017-11.md @@ -377,7 +377,7 @@ $ grep -Io -E 'session_id=[A-Z0-9]{32}:ip_addr=104.196.152.243' dspace.log.2017- - I emailed CIAT about the session issue, user agent issue, and told them they should not scrape the HTML contents of communities, instead using the REST API - About Baidu, I found a link to their [robots.txt tester tool](http://ziyuan.baidu.com/robots/) -- It seems like our robots.txt file is valid, and they claim to recognize that URLs like `/discover` should be forbidden: +- It seems like our robots.txt file is valid, and they claim to recognize that URLs like `/discover` should be forbidden (不允许, aka "not allowed"): ![Baidu robots.txt tester](/cgspace-notes/2017/11/baidu-robotstxt.png) diff --git a/public/2017-11/index.html b/public/2017-11/index.html index 527581e3c..c7ad416e1 100644 --- a/public/2017-11/index.html +++ b/public/2017-11/index.html @@ -38,7 +38,7 @@ COPY 54701 - + @@ -86,9 +86,9 @@ COPY 54701 "@type": "BlogPosting", "headline": "November, 2017", "url": "https://alanorth.github.io/cgspace-notes/2017-11/", - "wordCount": "2084", + "wordCount": "2118", "datePublished": "2017-11-02T09:37:54+02:00", - "dateModified": "2017-11-07T17:26:16+02:00", + "dateModified": "2017-11-07T18:09:29+02:00", "author": { "@type": "Person", "name": "Alan Orth" @@ -566,7 +566,7 @@ $ grep -Io -E 'session_id=[A-Z0-9]{32}:ip_addr=104.196.152.243' dspace.log.2017-
/discover
should be forbidden:/discover
should be forbidden (不允许, aka “not allowed”):# grep "Baiduspider/2.0" /var/log/nginx/access.log | awk '{print $1}' | sort -n | uniq | wc -l
+164
+
+
diff --git a/public/sitemap.xml b/public/sitemap.xml
index e653d34e3..8e35571fb 100644
--- a/public/sitemap.xml
+++ b/public/sitemap.xml
@@ -4,7 +4,7 @@