mirror of
https://github.com/ai-robots-txt/ai.robots.txt.git
synced 2025-12-29 12:18:33 +01:00
Compare commits
138 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa8519ec10 | ||
|
|
83485effdb | ||
|
|
8b8bf9da5d | ||
|
|
f1c752ef12 | ||
|
|
51afa7113a |
||
|
|
7598d77e4a | ||
|
|
45b071b29f | ||
|
|
f61b3496f7 | ||
|
|
8363d4fdd4 | ||
|
|
2ccd443581 |
||
|
|
6d75f3c1c9 |
||
|
|
56010ef913 |
||
|
|
3fadc88a23 | ||
|
|
47c077a8ef |
||
|
|
f5d7ccb243 |
||
|
|
30d719a09a |
||
|
|
05bbdebeaa | ||
|
|
c6ce9329a1 | ||
|
|
10d5ae2870 | ||
|
|
4467002298 |
||
|
|
4a159a818f |
||
|
|
3d6b33a71a |
||
|
|
c26c8c0911 | ||
|
|
91959fe791 |
||
|
|
b75163e796 |
||
|
|
f46754d280 |
||
|
|
c3f2fe758e |
||
|
|
7521c3af50 |
||
|
|
4302fd1aca | ||
|
|
9ca5033927 | ||
|
|
9cc3dbc05f | ||
|
|
c322d7852d |
||
|
|
35588b2ddb |
||
|
|
ab24e41106 | ||
|
|
b681c0c0d8 | ||
|
|
4e7e28335f |
||
|
|
a6cf6b204b | ||
|
|
2679fcad34 | ||
|
|
ef8eda4fe6 | ||
|
|
a29102f0fc |
||
|
|
0b3266b35f | ||
|
|
6225e3e98e | ||
|
|
be4d74412c | ||
|
|
729be4693a |
||
|
|
efb4d260da | ||
|
|
e2726ac160 | ||
|
|
663d030f96 |
||
|
|
28b45ea08d |
||
|
|
443dd27527 | ||
|
|
60b6a0829d |
||
|
|
00bf2b0e13 |
||
|
|
e87eb706e3 | ||
|
|
3d41350256 |
||
|
|
808451055c |
||
|
|
5cad0ee389 | ||
|
|
d55c9980cd |
||
|
|
192b0a2eef |
||
|
|
97e19445ce | ||
|
|
0bc2361be8 | ||
|
|
511d8c955d |
||
|
|
b89a9eae6a | ||
|
|
646ab08e15 | ||
|
|
5c3da1c1af |
||
|
|
d22b8dfd7b |
||
|
|
19c1d346c3 |
||
|
|
2fa0e9119c | ||
|
|
0874a92503 |
||
|
|
28d2d09633 |
||
|
|
260f5029fe | ||
|
|
91bf905fa9 | ||
|
|
56d03d46fb | ||
|
|
38d60b928c | ||
|
|
e2266bbc1d |
||
|
|
bf347bdf91 |
||
|
|
c6e7d69dd5 |
||
|
|
8906f6b447 | ||
|
|
2fd93029ca | ||
|
|
b6338ddc73 | ||
|
|
7ffbf33baf |
||
|
|
50870ba911 | ||
|
|
dd391bf960 | ||
|
|
229d1b4dbc | ||
|
|
4d506ca322 |
||
|
|
ec508ab434 |
||
|
|
0ed29412c9 | ||
|
|
2ad1c3e831 | ||
|
|
1677278c5a |
||
|
|
1a8edfa84a |
||
|
|
cf073d49f2 | ||
|
|
6d3f3e1712 | ||
|
|
784b8440a5 |
||
|
|
0e687a5b58 |
||
|
|
ff9fc26404 | ||
|
|
146a229662 | ||
|
|
64f9d6ce9c | ||
|
|
085dd1071e | ||
|
|
9565c11d4c | ||
|
|
8869442615 |
||
|
|
27420d6fed | ||
|
|
44e58a5ece |
||
|
|
12c5368e04 |
||
|
|
c15065544a |
||
|
|
9171625db6 |
||
|
|
8b188d0612 | ||
|
|
47769c8429 | ||
|
|
9843c31303 | ||
|
|
567e94a6cf | ||
|
|
30c4c037d9 |
||
|
|
c7407e721d |
||
|
|
f6f504012f | ||
|
|
63f1f56307 | ||
|
|
91a5b3c995 |
||
|
|
c5dd4e98b6 |
||
|
|
70ff0c9fbd | ||
|
|
20a242d390 | ||
|
|
91ad302d68 |
||
|
|
79bc22523f | ||
|
|
2bfcee794d | ||
|
|
7eb2099d3f | ||
|
|
accc48f327 |
||
|
|
4ed17b8e4a | ||
|
|
5326c202b5 | ||
|
|
a31ae1e6d0 |
||
|
|
7535893aec | ||
|
|
eb05f2f527 | ||
|
|
26a46c409d |
||
|
|
2b68568ac2 | ||
|
|
b05f2fee00 |
||
|
|
e53d81c66d | ||
|
|
20e327e74e |
||
|
|
8f17718e76 |
||
|
|
d760f9216f |
||
|
|
842e2256e8 | ||
|
|
229ea20426 |
||
|
|
14d68f05ba |
||
|
|
6c3ae6eb20 |
||
|
|
684d11d889 | ||
|
|
9b5f75e2f3 |
15 changed files with 733 additions and 58 deletions
9
.editorconfig
Normal file
9
.editorconfig
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
[{Caddyfile,haproxy-block-ai-bots.txt,nginx-block-ai-bots.conf}]
|
||||||
|
insert_final_newline = false
|
||||||
4
.github/workflows/ai_robots_update.yml
vendored
4
.github/workflows/ai_robots_update.yml
vendored
|
|
@ -2,6 +2,7 @@ name: Updates for AI robots files
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 * * *"
|
- cron: "0 0 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
dark-visitors:
|
dark-visitors:
|
||||||
|
|
@ -10,7 +11,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 0
|
||||||
- run: |
|
- run: |
|
||||||
pip install beautifulsoup4 requests
|
pip install beautifulsoup4 requests
|
||||||
git config --global user.name "dark-visitors"
|
git config --global user.name "dark-visitors"
|
||||||
|
|
@ -22,6 +23,7 @@ jobs:
|
||||||
git add -A
|
git add -A
|
||||||
if ! git diff --cached --quiet; then
|
if ! git diff --cached --quiet; then
|
||||||
git commit -m "Update from Dark Visitors"
|
git commit -m "Update from Dark Visitors"
|
||||||
|
git rebase origin/main
|
||||||
git push
|
git push
|
||||||
else
|
else
|
||||||
echo "No changes to commit."
|
echo "No changes to commit."
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
RewriteEngine On
|
RewriteEngine On
|
||||||
RewriteCond %{HTTP_USER_AGENT} (AI2Bot|Ai2Bot\-Dolma|aiHitBot|Amazonbot|Andibot|anthropic\-ai|Applebot|Applebot\-Extended|bedrockbot|Brightbot\ 1\.0|Bytespider|CCBot|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawlspace|Diffbot|DuckAssistBot|EchoboxBot|FacebookBot|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Google\-CloudVertexBot|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|MistralAI\-User/1\.0|NovaAct|OAI\-SearchBot|omgili|omgilibot|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|Sidetrade\ indexer\ bot|TikTokSpider|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|wpbot|YandexAdditional|YandexAdditionalBot|YouBot) [NC]
|
RewriteCond %{HTTP_USER_AGENT} (AddSearchBot|AI2Bot|AI2Bot\-DeepResearchEval|Ai2Bot\-Dolma|aiHitBot|amazon\-kendra|Amazonbot|AmazonBuyForMe|Andibot|Anomura|anthropic\-ai|Applebot|Applebot\-Extended|atlassian\-bot|Awario|bedrockbot|bigsur\.ai|Bravebot|Brightbot\ 1\.0|BuddyBot|Bytespider|CCBot|Channel3Bot|ChatGLM\-Spider|ChatGPT\ Agent|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|Cloudflare\-AutoRAG|CloudVertexBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawl4AI|Crawlspace|Datenbank\ Crawler|DeepSeekBot|Devin|Diffbot|DuckAssistBot|Echobot\ Bot|EchoboxBot|FacebookBot|facebookexternalhit|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Gemini\-Deep\-Research|Google\-CloudVertexBot|Google\-Extended|Google\-Firebase|Google\-NotebookLM|GoogleAgent\-Mariner|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iAskBot|iaskspider|iaskspider/2\.0|IbouBot|ICC\-Crawler|ImagesiftBot|imageSpider|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|KlaviyoAIBot|KunatoCrawler|laion\-huggingface\-processor|LAIONDownloader|LCC|LinerBot|Linguee\ Bot|LinkupBot|Manus\-User|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|meta\-webindexer|MistralAI\-User|MistralAI\-User/1\.0|MyCentralAIScraperBot|netEstate\ Imprint\ Crawler|NotebookLM|NovaAct|OAI\-SearchBot|omgili|omgilibot|OpenAI|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|Poggio\-Citations|Poseidon\ Research\ Crawler|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|ShapBot|Sidetrade\ indexer\ bot|Spider|TavilyBot|TerraCotta|Thinkbot|TikTokSpider|Timpibot|TwinAgent|VelenPublicWebCrawler|WARDBot|Webzio\-Extended|webzio\-extended|wpbot|WRTNBot|YaK|YandexAdditional|YandexAdditionalBot|YouBot|ZanistaBot) [NC]
|
||||||
RewriteRule !^/?robots\.txt$ - [F,L]
|
RewriteRule !^/?robots\.txt$ - [F]
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
@aibots {
|
@aibots {
|
||||||
header_regexp User-Agent "(AI2Bot|Ai2Bot\-Dolma|aiHitBot|Amazonbot|Andibot|anthropic\-ai|Applebot|Applebot\-Extended|bedrockbot|Brightbot\ 1\.0|Bytespider|CCBot|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawlspace|Diffbot|DuckAssistBot|EchoboxBot|FacebookBot|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Google\-CloudVertexBot|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|MistralAI\-User/1\.0|NovaAct|OAI\-SearchBot|omgili|omgilibot|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|Sidetrade\ indexer\ bot|TikTokSpider|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|wpbot|YandexAdditional|YandexAdditionalBot|YouBot)"
|
header_regexp User-Agent "(AddSearchBot|AI2Bot|AI2Bot\-DeepResearchEval|Ai2Bot\-Dolma|aiHitBot|amazon\-kendra|Amazonbot|AmazonBuyForMe|Andibot|Anomura|anthropic\-ai|Applebot|Applebot\-Extended|atlassian\-bot|Awario|bedrockbot|bigsur\.ai|Bravebot|Brightbot\ 1\.0|BuddyBot|Bytespider|CCBot|Channel3Bot|ChatGLM\-Spider|ChatGPT\ Agent|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|Cloudflare\-AutoRAG|CloudVertexBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawl4AI|Crawlspace|Datenbank\ Crawler|DeepSeekBot|Devin|Diffbot|DuckAssistBot|Echobot\ Bot|EchoboxBot|FacebookBot|facebookexternalhit|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Gemini\-Deep\-Research|Google\-CloudVertexBot|Google\-Extended|Google\-Firebase|Google\-NotebookLM|GoogleAgent\-Mariner|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iAskBot|iaskspider|iaskspider/2\.0|IbouBot|ICC\-Crawler|ImagesiftBot|imageSpider|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|KlaviyoAIBot|KunatoCrawler|laion\-huggingface\-processor|LAIONDownloader|LCC|LinerBot|Linguee\ Bot|LinkupBot|Manus\-User|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|meta\-webindexer|MistralAI\-User|MistralAI\-User/1\.0|MyCentralAIScraperBot|netEstate\ Imprint\ Crawler|NotebookLM|NovaAct|OAI\-SearchBot|omgili|omgilibot|OpenAI|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|Poggio\-Citations|Poseidon\ Research\ Crawler|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|ShapBot|Sidetrade\ indexer\ bot|Spider|TavilyBot|TerraCotta|Thinkbot|TikTokSpider|Timpibot|TwinAgent|VelenPublicWebCrawler|WARDBot|Webzio\-Extended|webzio\-extended|wpbot|WRTNBot|YaK|YandexAdditional|YandexAdditionalBot|YouBot|ZanistaBot)"
|
||||||
}
|
}
|
||||||
28
README.md
28
README.md
|
|
@ -44,15 +44,34 @@ Note that, as stated in the [httpd documentation](https://httpd.apache.org/docs/
|
||||||
middleware plugin for [Traefik](https://traefik.io/traefik/) to automatically add rules of [robots.txt](./robots.txt)
|
middleware plugin for [Traefik](https://traefik.io/traefik/) to automatically add rules of [robots.txt](./robots.txt)
|
||||||
file on-the-fly.
|
file on-the-fly.
|
||||||
|
|
||||||
|
- Alternatively you can [manually configure Traefik](./docs/traefik-manual-setup.md) to centrally serve a static `robots.txt`.
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
A note about contributing: updates should be added/made to `robots.json`. A GitHub action will then generate the updated `robots.txt`, `table-of-bot-metrics.md`, `.htaccess` and `nginx-block-ai-bots.conf`.
|
A note about contributing: updates should be added/made to `robots.json`. A GitHub action will then generate the updated `robots.txt`, `table-of-bot-metrics.md`, `.htaccess` and `nginx-block-ai-bots.conf`.
|
||||||
|
|
||||||
You can run the tests by [installing](https://www.python.org/about/gettingstarted/) Python 3 and issuing:
|
You can run the tests by [installing](https://www.python.org/about/gettingstarted/) Python 3, installing the dependencies:
|
||||||
|
```console
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
and then issuing:
|
||||||
```console
|
```console
|
||||||
code/tests.py
|
code/tests.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The `.editorconfig` file provides standard editor options for this project. See [EditorConfig](https://editorconfig.org/) for more information.
|
||||||
|
|
||||||
|
## Releasing
|
||||||
|
|
||||||
|
Admins may ship a new release `v1.n` (where `n` increments the minor version of the current release) as follows:
|
||||||
|
|
||||||
|
* Navigate to the [new release page](https://github.com/ai-robots-txt/ai.robots.txt/releases/new) on GitHub.
|
||||||
|
* Click `Select tag`, choose `Create new tag`, enter `v1.n` in the pop-up, and click `Create`.
|
||||||
|
* Enter a suitable release title (e.g. `v1.n: adds user-agent1, user-agent2`).
|
||||||
|
* Click `Generate release notes`.
|
||||||
|
* Click `Publish release`.
|
||||||
|
|
||||||
|
A GitHub action will then add the asset `robots.txt` to the release. That's it.
|
||||||
|
|
||||||
## Subscribe to updates
|
## Subscribe to updates
|
||||||
|
|
||||||
You can subscribe to list updates via RSS/Atom with the releases feed:
|
You can subscribe to list updates via RSS/Atom with the releases feed:
|
||||||
|
|
@ -65,6 +84,13 @@ You can subscribe with [Feedly](https://feedly.com/i/subscription/feed/https://g
|
||||||
|
|
||||||
Alternatively, you can also subscribe to new releases with your GitHub account by clicking the ⬇️ on "Watch" button at the top of this page, clicking "Custom" and selecting "Releases".
|
Alternatively, you can also subscribe to new releases with your GitHub account by clicking the ⬇️ on "Watch" button at the top of this page, clicking "Custom" and selecting "Releases".
|
||||||
|
|
||||||
|
## License content with RSL
|
||||||
|
|
||||||
|
It is also possible to license your content to AI companies in `robots.txt` using
|
||||||
|
the [Really Simple Licensing](https://rslstandard.org) standard, with an option of
|
||||||
|
collective bargaining. A [plugin](https://github.com/Jameswlepage/rsl-wp) currently
|
||||||
|
implements RSL as well as payment processing for WordPress sites.
|
||||||
|
|
||||||
## Report abusive crawlers
|
## Report abusive crawlers
|
||||||
|
|
||||||
If you use [Cloudflare's hard block](https://blog.cloudflare.com/declaring-your-aindependence-block-ai-bots-scrapers-and-crawlers-with-a-single-click) alongside this list, you can report abusive crawlers that don't respect `robots.txt` [here](https://docs.google.com/forms/d/e/1FAIpQLScbUZ2vlNSdcsb8LyTeSF7uLzQI96s0BKGoJ6wQ6ocUFNOKEg/viewform).
|
If you use [Cloudflare's hard block](https://blog.cloudflare.com/declaring-your-aindependence-block-ai-bots-scrapers-and-crawlers-with-a-single-click) alongside this list, you can report abusive crawlers that don't respect `robots.txt` [here](https://docs.google.com/forms/d/e/1FAIpQLScbUZ2vlNSdcsb8LyTeSF7uLzQI96s0BKGoJ6wQ6ocUFNOKEg/viewform).
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,16 @@ def updated_robots_json(soup):
|
||||||
for agent in section.find_all("a", href=True):
|
for agent in section.find_all("a", href=True):
|
||||||
name = agent.find("div", {"class": "agent-name"}).get_text().strip()
|
name = agent.find("div", {"class": "agent-name"}).get_text().strip()
|
||||||
name = clean_robot_name(name)
|
name = clean_robot_name(name)
|
||||||
desc = agent.find("p").get_text().strip()
|
|
||||||
|
# This line below occasionally throws this error: AttributeError: 'NoneType' object has no attribute 'get_text'
|
||||||
|
#desc = agent.find("p").get_text().strip()
|
||||||
|
|
||||||
|
# Attempting a different way to handle to avoid errors:
|
||||||
|
p_tag = agent.find("p")
|
||||||
|
if p_tag is not None:
|
||||||
|
desc = p_tag.get_text().strip()
|
||||||
|
else:
|
||||||
|
desc = "Description unavailable from darkvisitors.com"
|
||||||
|
|
||||||
default_values = {
|
default_values = {
|
||||||
"Unclear at this time.",
|
"Unclear at this time.",
|
||||||
|
|
@ -169,13 +178,13 @@ def json_to_htaccess(robot_json):
|
||||||
# User agents that contain any of the blocked values.
|
# User agents that contain any of the blocked values.
|
||||||
htaccess = "RewriteEngine On\n"
|
htaccess = "RewriteEngine On\n"
|
||||||
htaccess += f"RewriteCond %{{HTTP_USER_AGENT}} {list_to_pcre(robot_json.keys())} [NC]\n"
|
htaccess += f"RewriteCond %{{HTTP_USER_AGENT}} {list_to_pcre(robot_json.keys())} [NC]\n"
|
||||||
htaccess += "RewriteRule !^/?robots\\.txt$ - [F,L]\n"
|
htaccess += "RewriteRule !^/?robots\\.txt$ - [F]\n"
|
||||||
return htaccess
|
return htaccess
|
||||||
|
|
||||||
def json_to_nginx(robot_json):
|
def json_to_nginx(robot_json):
|
||||||
# Creates an Nginx config file. This config snippet can be included in
|
# Creates an Nginx config file. This config snippet can be included in
|
||||||
# nginx server{} blocks to block AI bots.
|
# nginx server{} blocks to block AI bots.
|
||||||
config = f"if ($http_user_agent ~* \"{list_to_pcre(robot_json.keys())}\") {{\n return 403;\n}}"
|
config = f"set $block 0;\n\nif ($http_user_agent ~* \"{list_to_pcre(robot_json.keys())}\") {{\n set $block 1;\n}}\n\nif ($request_uri = \"/robots.txt\") {{\n set $block 0;\n}}\n\nif ($block) {{\n return 403;\n}}"
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
RewriteEngine On
|
RewriteEngine On
|
||||||
RewriteCond %{HTTP_USER_AGENT} (AI2Bot|Ai2Bot\-Dolma|Amazonbot|anthropic\-ai|Applebot|Applebot\-Extended|Bytespider|CCBot|ChatGPT\-User|Claude\-Web|ClaudeBot|cohere\-ai|Diffbot|FacebookBot|facebookexternalhit|FriendlyCrawler|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|Meta\-ExternalAgent|Meta\-ExternalFetcher|OAI\-SearchBot|omgili|omgilibot|Perplexity\-User|PerplexityBot|PetalBot|Scrapy|Sidetrade\ indexer\ bot|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|YouBot|crawler\.with\.dots|star\*\*\*crawler|Is\ this\ a\ crawler\?|a\[mazing\]\{42\}\(robot\)|2\^32\$|curl\|sudo\ bash) [NC]
|
RewriteCond %{HTTP_USER_AGENT} (AI2Bot|Ai2Bot\-Dolma|Amazonbot|anthropic\-ai|Applebot|Applebot\-Extended|Bytespider|CCBot|ChatGPT\-User|Claude\-Web|ClaudeBot|cohere\-ai|Diffbot|FacebookBot|facebookexternalhit|FriendlyCrawler|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|Meta\-ExternalAgent|Meta\-ExternalFetcher|OAI\-SearchBot|omgili|omgilibot|Perplexity\-User|PerplexityBot|PetalBot|Scrapy|Sidetrade\ indexer\ bot|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|YouBot|crawler\.with\.dots|star\*\*\*crawler|Is\ this\ a\ crawler\?|a\[mazing\]\{42\}\(robot\)|2\^32\$|curl\|sudo\ bash) [NC]
|
||||||
RewriteRule !^/?robots\.txt$ - [F,L]
|
RewriteRule !^/?robots\.txt$ - [F]
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,13 @@
|
||||||
|
set $block 0;
|
||||||
|
|
||||||
if ($http_user_agent ~* "(AI2Bot|Ai2Bot\-Dolma|Amazonbot|anthropic\-ai|Applebot|Applebot\-Extended|Bytespider|CCBot|ChatGPT\-User|Claude\-Web|ClaudeBot|cohere\-ai|Diffbot|FacebookBot|facebookexternalhit|FriendlyCrawler|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|Meta\-ExternalAgent|Meta\-ExternalFetcher|OAI\-SearchBot|omgili|omgilibot|Perplexity\-User|PerplexityBot|PetalBot|Scrapy|Sidetrade\ indexer\ bot|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|YouBot|crawler\.with\.dots|star\*\*\*crawler|Is\ this\ a\ crawler\?|a\[mazing\]\{42\}\(robot\)|2\^32\$|curl\|sudo\ bash)") {
|
if ($http_user_agent ~* "(AI2Bot|Ai2Bot\-Dolma|Amazonbot|anthropic\-ai|Applebot|Applebot\-Extended|Bytespider|CCBot|ChatGPT\-User|Claude\-Web|ClaudeBot|cohere\-ai|Diffbot|FacebookBot|facebookexternalhit|FriendlyCrawler|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|Meta\-ExternalAgent|Meta\-ExternalFetcher|OAI\-SearchBot|omgili|omgilibot|Perplexity\-User|PerplexityBot|PetalBot|Scrapy|Sidetrade\ indexer\ bot|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|YouBot|crawler\.with\.dots|star\*\*\*crawler|Is\ this\ a\ crawler\?|a\[mazing\]\{42\}\(robot\)|2\^32\$|curl\|sudo\ bash)") {
|
||||||
|
set $block 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($request_uri = "/robots.txt") {
|
||||||
|
set $block 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($block) {
|
||||||
return 403;
|
return 403;
|
||||||
}
|
}
|
||||||
36
docs/traefik-manual-setup.md
Normal file
36
docs/traefik-manual-setup.md
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Intro
|
||||||
|
If you're using Traefik as your reverse proxy in your docker setup, you might want to use it as well to centrally serve the ```/robots.txt``` for all your Traefik fronted services.
|
||||||
|
|
||||||
|
This can be achieved by configuring a single lightweight service to service static files and defining a high priority Traefik HTTP Router rule.
|
||||||
|
|
||||||
|
# Setup
|
||||||
|
Define a single service to serve the one robots.txt to rule them all. I'm using a lean nginx:alpine docker image in this example:
|
||||||
|
|
||||||
|
```
|
||||||
|
services:
|
||||||
|
robots:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: robots-server
|
||||||
|
volumes:
|
||||||
|
- ./static/:/usr/share/nginx/html/:ro
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
# Router for all /robots.txt requests
|
||||||
|
- "traefik.http.routers.robots.rule=Path(`/robots.txt`)"
|
||||||
|
- "traefik.http.routers.robots.entrypoints=web,websecure"
|
||||||
|
- "traefik.http.routers.robots.priority=3000"
|
||||||
|
- "traefik.http.routers.robots.service=robots"
|
||||||
|
- "traefik.http.routers.robots.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.services.robots.loadbalancer.server.port=80"
|
||||||
|
networks:
|
||||||
|
- external_network
|
||||||
|
|
||||||
|
networks:
|
||||||
|
external_network:
|
||||||
|
name: traefik_external_network
|
||||||
|
external: true
|
||||||
|
```
|
||||||
|
|
||||||
|
The Traefik HTTP Routers rule explicitly does not contain a Hostname. Traefik will print a warning about this for the TLS setup but it will work. The high priority of 3000 should ensure this rule is evaluated first for incoming requests.
|
||||||
|
|
||||||
|
Place your robots.txt in the local `./static/` directory and NGINX will serve it for all services behind your Traefik proxy.
|
||||||
|
|
@ -1,52 +1,96 @@
|
||||||
|
AddSearchBot
|
||||||
AI2Bot
|
AI2Bot
|
||||||
|
AI2Bot-DeepResearchEval
|
||||||
Ai2Bot-Dolma
|
Ai2Bot-Dolma
|
||||||
aiHitBot
|
aiHitBot
|
||||||
|
amazon-kendra
|
||||||
Amazonbot
|
Amazonbot
|
||||||
|
AmazonBuyForMe
|
||||||
Andibot
|
Andibot
|
||||||
|
Anomura
|
||||||
anthropic-ai
|
anthropic-ai
|
||||||
Applebot
|
Applebot
|
||||||
Applebot-Extended
|
Applebot-Extended
|
||||||
|
atlassian-bot
|
||||||
|
Awario
|
||||||
bedrockbot
|
bedrockbot
|
||||||
|
bigsur.ai
|
||||||
|
Bravebot
|
||||||
Brightbot 1.0
|
Brightbot 1.0
|
||||||
|
BuddyBot
|
||||||
Bytespider
|
Bytespider
|
||||||
CCBot
|
CCBot
|
||||||
|
Channel3Bot
|
||||||
|
ChatGLM-Spider
|
||||||
|
ChatGPT Agent
|
||||||
ChatGPT-User
|
ChatGPT-User
|
||||||
Claude-SearchBot
|
Claude-SearchBot
|
||||||
Claude-User
|
Claude-User
|
||||||
Claude-Web
|
Claude-Web
|
||||||
ClaudeBot
|
ClaudeBot
|
||||||
|
Cloudflare-AutoRAG
|
||||||
|
CloudVertexBot
|
||||||
cohere-ai
|
cohere-ai
|
||||||
cohere-training-data-crawler
|
cohere-training-data-crawler
|
||||||
Cotoyogi
|
Cotoyogi
|
||||||
|
Crawl4AI
|
||||||
Crawlspace
|
Crawlspace
|
||||||
|
Datenbank Crawler
|
||||||
|
DeepSeekBot
|
||||||
|
Devin
|
||||||
Diffbot
|
Diffbot
|
||||||
DuckAssistBot
|
DuckAssistBot
|
||||||
|
Echobot Bot
|
||||||
EchoboxBot
|
EchoboxBot
|
||||||
FacebookBot
|
FacebookBot
|
||||||
|
facebookexternalhit
|
||||||
Factset_spyderbot
|
Factset_spyderbot
|
||||||
FirecrawlAgent
|
FirecrawlAgent
|
||||||
FriendlyCrawler
|
FriendlyCrawler
|
||||||
|
Gemini-Deep-Research
|
||||||
Google-CloudVertexBot
|
Google-CloudVertexBot
|
||||||
Google-Extended
|
Google-Extended
|
||||||
|
Google-Firebase
|
||||||
|
Google-NotebookLM
|
||||||
|
GoogleAgent-Mariner
|
||||||
GoogleOther
|
GoogleOther
|
||||||
GoogleOther-Image
|
GoogleOther-Image
|
||||||
GoogleOther-Video
|
GoogleOther-Video
|
||||||
GPTBot
|
GPTBot
|
||||||
|
iAskBot
|
||||||
|
iaskspider
|
||||||
iaskspider/2.0
|
iaskspider/2.0
|
||||||
|
IbouBot
|
||||||
ICC-Crawler
|
ICC-Crawler
|
||||||
ImagesiftBot
|
ImagesiftBot
|
||||||
|
imageSpider
|
||||||
img2dataset
|
img2dataset
|
||||||
ISSCyberRiskCrawler
|
ISSCyberRiskCrawler
|
||||||
Kangaroo Bot
|
Kangaroo Bot
|
||||||
|
KlaviyoAIBot
|
||||||
|
KunatoCrawler
|
||||||
|
laion-huggingface-processor
|
||||||
|
LAIONDownloader
|
||||||
|
LCC
|
||||||
|
LinerBot
|
||||||
|
Linguee Bot
|
||||||
|
LinkupBot
|
||||||
|
Manus-User
|
||||||
meta-externalagent
|
meta-externalagent
|
||||||
Meta-ExternalAgent
|
Meta-ExternalAgent
|
||||||
meta-externalfetcher
|
meta-externalfetcher
|
||||||
Meta-ExternalFetcher
|
Meta-ExternalFetcher
|
||||||
|
meta-webindexer
|
||||||
|
MistralAI-User
|
||||||
MistralAI-User/1.0
|
MistralAI-User/1.0
|
||||||
|
MyCentralAIScraperBot
|
||||||
|
netEstate Imprint Crawler
|
||||||
|
NotebookLM
|
||||||
NovaAct
|
NovaAct
|
||||||
OAI-SearchBot
|
OAI-SearchBot
|
||||||
omgili
|
omgili
|
||||||
omgilibot
|
omgilibot
|
||||||
|
OpenAI
|
||||||
Operator
|
Operator
|
||||||
PanguBot
|
PanguBot
|
||||||
Panscient
|
Panscient
|
||||||
|
|
@ -55,6 +99,8 @@ Perplexity-User
|
||||||
PerplexityBot
|
PerplexityBot
|
||||||
PetalBot
|
PetalBot
|
||||||
PhindBot
|
PhindBot
|
||||||
|
Poggio-Citations
|
||||||
|
Poseidon Research Crawler
|
||||||
QualifiedBot
|
QualifiedBot
|
||||||
QuillBot
|
QuillBot
|
||||||
quillbot.com
|
quillbot.com
|
||||||
|
|
@ -62,12 +108,23 @@ SBIntuitionsBot
|
||||||
Scrapy
|
Scrapy
|
||||||
SemrushBot-OCOB
|
SemrushBot-OCOB
|
||||||
SemrushBot-SWA
|
SemrushBot-SWA
|
||||||
|
ShapBot
|
||||||
Sidetrade indexer bot
|
Sidetrade indexer bot
|
||||||
|
Spider
|
||||||
|
TavilyBot
|
||||||
|
TerraCotta
|
||||||
|
Thinkbot
|
||||||
TikTokSpider
|
TikTokSpider
|
||||||
Timpibot
|
Timpibot
|
||||||
|
TwinAgent
|
||||||
VelenPublicWebCrawler
|
VelenPublicWebCrawler
|
||||||
|
WARDBot
|
||||||
Webzio-Extended
|
Webzio-Extended
|
||||||
|
webzio-extended
|
||||||
wpbot
|
wpbot
|
||||||
|
WRTNBot
|
||||||
|
YaK
|
||||||
YandexAdditional
|
YandexAdditional
|
||||||
YandexAdditionalBot
|
YandexAdditionalBot
|
||||||
YouBot
|
YouBot
|
||||||
|
ZanistaBot
|
||||||
|
|
@ -1,3 +1,13 @@
|
||||||
if ($http_user_agent ~* "(AI2Bot|Ai2Bot\-Dolma|aiHitBot|Amazonbot|Andibot|anthropic\-ai|Applebot|Applebot\-Extended|bedrockbot|Brightbot\ 1\.0|Bytespider|CCBot|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawlspace|Diffbot|DuckAssistBot|EchoboxBot|FacebookBot|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Google\-CloudVertexBot|Google\-Extended|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iaskspider/2\.0|ICC\-Crawler|ImagesiftBot|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|MistralAI\-User/1\.0|NovaAct|OAI\-SearchBot|omgili|omgilibot|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|Sidetrade\ indexer\ bot|TikTokSpider|Timpibot|VelenPublicWebCrawler|Webzio\-Extended|wpbot|YandexAdditional|YandexAdditionalBot|YouBot)") {
|
set $block 0;
|
||||||
|
|
||||||
|
if ($http_user_agent ~* "(AddSearchBot|AI2Bot|AI2Bot\-DeepResearchEval|Ai2Bot\-Dolma|aiHitBot|amazon\-kendra|Amazonbot|AmazonBuyForMe|Andibot|Anomura|anthropic\-ai|Applebot|Applebot\-Extended|atlassian\-bot|Awario|bedrockbot|bigsur\.ai|Bravebot|Brightbot\ 1\.0|BuddyBot|Bytespider|CCBot|Channel3Bot|ChatGLM\-Spider|ChatGPT\ Agent|ChatGPT\-User|Claude\-SearchBot|Claude\-User|Claude\-Web|ClaudeBot|Cloudflare\-AutoRAG|CloudVertexBot|cohere\-ai|cohere\-training\-data\-crawler|Cotoyogi|Crawl4AI|Crawlspace|Datenbank\ Crawler|DeepSeekBot|Devin|Diffbot|DuckAssistBot|Echobot\ Bot|EchoboxBot|FacebookBot|facebookexternalhit|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Gemini\-Deep\-Research|Google\-CloudVertexBot|Google\-Extended|Google\-Firebase|Google\-NotebookLM|GoogleAgent\-Mariner|GoogleOther|GoogleOther\-Image|GoogleOther\-Video|GPTBot|iAskBot|iaskspider|iaskspider/2\.0|IbouBot|ICC\-Crawler|ImagesiftBot|imageSpider|img2dataset|ISSCyberRiskCrawler|Kangaroo\ Bot|KlaviyoAIBot|KunatoCrawler|laion\-huggingface\-processor|LAIONDownloader|LCC|LinerBot|Linguee\ Bot|LinkupBot|Manus\-User|meta\-externalagent|Meta\-ExternalAgent|meta\-externalfetcher|Meta\-ExternalFetcher|meta\-webindexer|MistralAI\-User|MistralAI\-User/1\.0|MyCentralAIScraperBot|netEstate\ Imprint\ Crawler|NotebookLM|NovaAct|OAI\-SearchBot|omgili|omgilibot|OpenAI|Operator|PanguBot|Panscient|panscient\.com|Perplexity\-User|PerplexityBot|PetalBot|PhindBot|Poggio\-Citations|Poseidon\ Research\ Crawler|QualifiedBot|QuillBot|quillbot\.com|SBIntuitionsBot|Scrapy|SemrushBot\-OCOB|SemrushBot\-SWA|ShapBot|Sidetrade\ indexer\ bot|Spider|TavilyBot|TerraCotta|Thinkbot|TikTokSpider|Timpibot|TwinAgent|VelenPublicWebCrawler|WARDBot|Webzio\-Extended|webzio\-extended|wpbot|WRTNBot|YaK|YandexAdditional|YandexAdditionalBot|YouBot|ZanistaBot)") {
|
||||||
|
set $block 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($request_uri = "/robots.txt") {
|
||||||
|
set $block 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($block) {
|
||||||
return 403;
|
return 403;
|
||||||
}
|
}
|
||||||
3
requirements.txt
Normal file
3
requirements.txt
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
beautifulsoup4
|
||||||
|
lxml
|
||||||
|
requests
|
||||||
475
robots.json
475
robots.json
|
|
@ -1,4 +1,11 @@
|
||||||
{
|
{
|
||||||
|
"AddSearchBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "AddSearchBot is a web crawler that indexes website content for AddSearch's AI-powered site search solution, collecting data to provide fast and accurate search results. More info can be found at https://darkvisitors.com/agents/agents/addsearchbot"
|
||||||
|
},
|
||||||
"AI2Bot": {
|
"AI2Bot": {
|
||||||
"operator": "[Ai2](https://allenai.org/crawler)",
|
"operator": "[Ai2](https://allenai.org/crawler)",
|
||||||
"respect": "Yes",
|
"respect": "Yes",
|
||||||
|
|
@ -6,12 +13,19 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Explores 'certain domains' to find web content."
|
"description": "Explores 'certain domains' to find web content."
|
||||||
},
|
},
|
||||||
|
"AI2Bot-DeepResearchEval": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/ai2bot-deepresearcheval"
|
||||||
|
},
|
||||||
"Ai2Bot-Dolma": {
|
"Ai2Bot-Dolma": {
|
||||||
"description": "Explores 'certain domains' to find web content.",
|
|
||||||
"frequency": "No information provided.",
|
|
||||||
"function": "Content is used to train open language models.",
|
|
||||||
"operator": "[Ai2](https://allenai.org/crawler)",
|
"operator": "[Ai2](https://allenai.org/crawler)",
|
||||||
"respect": "Yes"
|
"respect": "Yes",
|
||||||
|
"function": "Content is used to train open language models.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Explores 'certain domains' to find web content."
|
||||||
},
|
},
|
||||||
"aiHitBot": {
|
"aiHitBot": {
|
||||||
"operator": "[aiHit](https://www.aihitdata.com/about)",
|
"operator": "[aiHit](https://www.aihitdata.com/about)",
|
||||||
|
|
@ -20,6 +34,13 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Scrapes data for AI systems."
|
"description": "Scrapes data for AI systems."
|
||||||
},
|
},
|
||||||
|
"amazon-kendra": {
|
||||||
|
"operator": "Amazon",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Collects data for AI natural language search",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Amazon Kendra is a highly accurate intelligent search service that enables your users to search unstructured data using natural language. It returns specific answers to questions, giving users an experience that's close to interacting with a human expert. It is highly scalable and capable of meeting performance demands, tightly integrated with other AWS services such as Amazon S3 and Amazon Lex, and offers enterprise-grade security."
|
||||||
|
},
|
||||||
"Amazonbot": {
|
"Amazonbot": {
|
||||||
"operator": "Amazon",
|
"operator": "Amazon",
|
||||||
"respect": "Yes",
|
"respect": "Yes",
|
||||||
|
|
@ -27,6 +48,13 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses."
|
"description": "Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses."
|
||||||
},
|
},
|
||||||
|
"AmazonBuyForMe": {
|
||||||
|
"operator": "[Amazon](https://amazon.com)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Buy For Me is an AI agent that helps buy products at the direction of customers."
|
||||||
|
},
|
||||||
"Andibot": {
|
"Andibot": {
|
||||||
"operator": "[Andi](https://andisearch.com/)",
|
"operator": "[Andi](https://andisearch.com/)",
|
||||||
"respect": "Unclear at this time",
|
"respect": "Unclear at this time",
|
||||||
|
|
@ -34,6 +62,13 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Scrapes website and provides AI summary."
|
"description": "Scrapes website and provides AI summary."
|
||||||
},
|
},
|
||||||
|
"Anomura": {
|
||||||
|
"operator": "[Direqt](https://direqt.ai)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Collects data for AI search",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Anomura is Direqt's search crawler, it discovers and indexes pages their customers websites."
|
||||||
|
},
|
||||||
"anthropic-ai": {
|
"anthropic-ai": {
|
||||||
"operator": "[Anthropic](https://www.anthropic.com)",
|
"operator": "[Anthropic](https://www.anthropic.com)",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -55,6 +90,20 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools."
|
"description": "Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools."
|
||||||
},
|
},
|
||||||
|
"atlassian-bot": {
|
||||||
|
"operator": "[Atlassian](https://www.atlassian.com)",
|
||||||
|
"respect": "[Yes](https://support.atlassian.com/organization-administration/docs/connect-custom-website-to-rovo/#Editing-your-robots.txt)",
|
||||||
|
"function": "AI search, assistants and agents",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "atlassian-bot is a web crawler used to index website content for its AI search, assistants and agents available in its Rovo GenAI product."
|
||||||
|
},
|
||||||
|
"Awario": {
|
||||||
|
"operator": "Awario",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Awario is an AI data scraper operated by Awario. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/awario"
|
||||||
|
},
|
||||||
"bedrockbot": {
|
"bedrockbot": {
|
||||||
"operator": "[Amazon](https://amazon.com)",
|
"operator": "[Amazon](https://amazon.com)",
|
||||||
"respect": "[Yes](https://docs.aws.amazon.com/bedrock/latest/userguide/webcrawl-data-source-connector.html#configuration-webcrawl-connector)",
|
"respect": "[Yes](https://docs.aws.amazon.com/bedrock/latest/userguide/webcrawl-data-source-connector.html#configuration-webcrawl-connector)",
|
||||||
|
|
@ -62,12 +111,33 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Connects to and crawls URLs that have been selected for use in a user's AWS bedrock application."
|
"description": "Connects to and crawls URLs that have been selected for use in a user's AWS bedrock application."
|
||||||
},
|
},
|
||||||
|
"bigsur.ai": {
|
||||||
|
"operator": "Big Sur AI that fetches website content to enable AI-powered web agents, sales assistants, and content marketing solutions for businesses",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "bigsur.ai is a web crawler operated by Big Sur AI that fetches website content to enable AI-powered web agents, sales assistants, and content marketing solutions for businesses. More info can be found at https://darkvisitors.com/agents/agents/bigsur-ai"
|
||||||
|
},
|
||||||
|
"Bravebot": {
|
||||||
|
"operator": "https://safe.search.brave.com/help/brave-search-crawler",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Collects data for AI search",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Brave search has a crawler to discover new pages and index their content."
|
||||||
|
},
|
||||||
"Brightbot 1.0": {
|
"Brightbot 1.0": {
|
||||||
"operator": "Browsing.ai",
|
"operator": "https://brightdata.com/brightbot",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
"function": "LLM/AI training.",
|
"function": "LLM/AI training.",
|
||||||
|
"frequency": "At least one per minute.",
|
||||||
|
"description": "Scrapes data to train LLMs and AI products focused on website customer support, [uses residential IPs and legit-looking user-agents to disguise itself](https://ksol.io/en/blog/posts/brightbot-not-that-bright/)."
|
||||||
|
},
|
||||||
|
"BuddyBot": {
|
||||||
|
"operator": "[BuddyBotLearning](https://www.buddybotlearning.com)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Learning Companion",
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Scrapes data to train LLMs and AI products focused on website customer support."
|
"description": "BuddyBot is a voice-controlled AI learning companion targeted at childhooded STEM education."
|
||||||
},
|
},
|
||||||
"Bytespider": {
|
"Bytespider": {
|
||||||
"operator": "ByteDance",
|
"operator": "ByteDance",
|
||||||
|
|
@ -83,6 +153,27 @@
|
||||||
"frequency": "Monthly at present.",
|
"frequency": "Monthly at present.",
|
||||||
"description": "Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers)."
|
"description": "Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers)."
|
||||||
},
|
},
|
||||||
|
"Channel3Bot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/channel3bot"
|
||||||
|
},
|
||||||
|
"ChatGLM-Spider": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/chatglm-spider"
|
||||||
|
},
|
||||||
|
"ChatGPT Agent": {
|
||||||
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "ChatGPT Agent is an AI agent created by OpenAI that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/chatgpt-agent"
|
||||||
|
},
|
||||||
"ChatGPT-User": {
|
"ChatGPT-User": {
|
||||||
"operator": "[OpenAI](https://openai.com)",
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
"respect": "Yes",
|
"respect": "Yes",
|
||||||
|
|
@ -118,6 +209,20 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Scrapes data to train LLMs and AI products offered by Anthropic."
|
"description": "Scrapes data to train LLMs and AI products offered by Anthropic."
|
||||||
},
|
},
|
||||||
|
"Cloudflare-AutoRAG": {
|
||||||
|
"operator": "[Cloudflare](https://developers.cloudflare.com/autorag)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Collects data for AI search",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "AutoRAG is an all-in-one AI search solution."
|
||||||
|
},
|
||||||
|
"CloudVertexBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "CloudVertexBot is a Google-operated crawler available to site owners to request targeted crawls of their own sites for AI training purposes on the Vertex AI platform. More info can be found at https://darkvisitors.com/agents/agents/cloudvertexbot"
|
||||||
|
},
|
||||||
"cohere-ai": {
|
"cohere-ai": {
|
||||||
"operator": "[Cohere](https://cohere.com)",
|
"operator": "[Cohere](https://cohere.com)",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -139,6 +244,13 @@
|
||||||
"frequency": "No information provided.",
|
"frequency": "No information provided.",
|
||||||
"description": "Scrapes data for AI training in Japanese language."
|
"description": "Scrapes data for AI training in Japanese language."
|
||||||
},
|
},
|
||||||
|
"Crawl4AI": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/crawl4ai"
|
||||||
|
},
|
||||||
"Crawlspace": {
|
"Crawlspace": {
|
||||||
"operator": "[Crawlspace](https://crawlspace.dev)",
|
"operator": "[Crawlspace](https://crawlspace.dev)",
|
||||||
"respect": "[Yes](https://news.ycombinator.com/item?id=42756654)",
|
"respect": "[Yes](https://news.ycombinator.com/item?id=42756654)",
|
||||||
|
|
@ -146,6 +258,27 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Provides crawling services for any purpose, probably including AI model training."
|
"description": "Provides crawling services for any purpose, probably including AI model training."
|
||||||
},
|
},
|
||||||
|
"Datenbank Crawler": {
|
||||||
|
"operator": "Datenbank",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Datenbank Crawler is an AI data scraper operated by Datenbank. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/datenbank-crawler"
|
||||||
|
},
|
||||||
|
"DeepSeekBot": {
|
||||||
|
"operator": "DeepSeek",
|
||||||
|
"respect": "No",
|
||||||
|
"function": "Training language models and improving AI products",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "DeepSeekBot is a web crawler used by DeepSeek to train its language models and improve its AI products."
|
||||||
|
},
|
||||||
|
"Devin": {
|
||||||
|
"operator": "Devin AI",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Devin is a collaborative AI teammate built to help ambitious engineering teams achieve more."
|
||||||
|
},
|
||||||
"Diffbot": {
|
"Diffbot": {
|
||||||
"operator": "[Diffbot](https://www.diffbot.com/)",
|
"operator": "[Diffbot](https://www.diffbot.com/)",
|
||||||
"respect": "At the discretion of Diffbot users.",
|
"respect": "At the discretion of Diffbot users.",
|
||||||
|
|
@ -160,6 +293,13 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "DuckAssistBot is used by DuckDuckGo's DuckAssist feature to fetch content and generate realtime AI answers to user searches. More info can be found at https://darkvisitors.com/agents/agents/duckassistbot"
|
"description": "DuckAssistBot is used by DuckDuckGo's DuckAssist feature to fetch content and generate realtime AI answers to user searches. More info can be found at https://darkvisitors.com/agents/agents/duckassistbot"
|
||||||
},
|
},
|
||||||
|
"Echobot Bot": {
|
||||||
|
"operator": "Echobox",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Echobot Bot is an AI data scraper operated by Echobox. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/echobot-bot"
|
||||||
|
},
|
||||||
"EchoboxBot": {
|
"EchoboxBot": {
|
||||||
"operator": "[Echobox](https://echobox.com)",
|
"operator": "[Echobox](https://echobox.com)",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -174,6 +314,13 @@
|
||||||
"frequency": "Up to 1 page per second",
|
"frequency": "Up to 1 page per second",
|
||||||
"description": "Officially used for training Meta \"speech recognition technology,\" unknown if used to train Meta AI specifically."
|
"description": "Officially used for training Meta \"speech recognition technology,\" unknown if used to train Meta AI specifically."
|
||||||
},
|
},
|
||||||
|
"facebookexternalhit": {
|
||||||
|
"operator": "Meta/Facebook",
|
||||||
|
"respect": "[No](https://github.com/ai-robots-txt/ai.robots.txt/issues/40#issuecomment-2524591313)",
|
||||||
|
"function": "Ostensibly only for sharing, but likely used as an AI crawler as well",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Note that excluding FacebookExternalHit will block incorporating OpenGraph data when sharing in social media, including rich links in Apple's Messages app. [According to Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers/), its purpose is \"to crawl the content of an app or website that was shared on one of Meta\u2019s family of apps\u2026\". However, see discussions [here](https://github.com/ai-robots-txt/ai.robots.txt/pull/21) and [here](https://github.com/ai-robots-txt/ai.robots.txt/issues/40#issuecomment-2524591313) for evidence to the contrary."
|
||||||
|
},
|
||||||
"Factset_spyderbot": {
|
"Factset_spyderbot": {
|
||||||
"operator": "[Factset](https://www.factset.com/ai)",
|
"operator": "[Factset](https://www.factset.com/ai)",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -195,6 +342,13 @@
|
||||||
"operator": "Unknown",
|
"operator": "Unknown",
|
||||||
"respect": "[Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler)"
|
"respect": "[Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler)"
|
||||||
},
|
},
|
||||||
|
"Gemini-Deep-Research": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Gemini-Deep-Research is the agent responsible for collecting and scanning resources used in Google Gemini's Deep Research feature, which acts as a personal research assistant. More info can be found at https://darkvisitors.com/agents/agents/gemini-deep-research"
|
||||||
|
},
|
||||||
"Google-CloudVertexBot": {
|
"Google-CloudVertexBot": {
|
||||||
"operator": "Google",
|
"operator": "Google",
|
||||||
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)",
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)",
|
||||||
|
|
@ -209,12 +363,33 @@
|
||||||
"frequency": "No information.",
|
"frequency": "No information.",
|
||||||
"description": "Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search."
|
"description": "Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search."
|
||||||
},
|
},
|
||||||
"GoogleOther": {
|
"Google-Firebase": {
|
||||||
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
|
||||||
"frequency": "No information.",
|
|
||||||
"function": "Scrapes data.",
|
|
||||||
"operator": "Google",
|
"operator": "Google",
|
||||||
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)"
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Used as part of AI apps developed by users of Google's Firebase AI products.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Supports Google's Firebase AI products."
|
||||||
|
},
|
||||||
|
"Google-NotebookLM": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Google-NotebookLM is an AI-powered research and note-taking assistant that helps users synthesize information from their own uploaded sources, such as documents, transcripts, or web content. It can generate summaries, answer questions, and highlight key themes from the materials you provide, acting like a personalized research companion built on Google's Gemini model. Google-NotebookLM fetches source URLs when users add them to their notebooks, enabling the AI to access and analyze those pages for context and insights. More info can be found at https://darkvisitors.com/agents/agents/google-notebooklm"
|
||||||
|
},
|
||||||
|
"GoogleAgent-Mariner": {
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "GoogleAgent-Mariner is an AI agent created by Google that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/googleagent-mariner"
|
||||||
|
},
|
||||||
|
"GoogleOther": {
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)",
|
||||||
|
"function": "Scrapes data.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\""
|
||||||
},
|
},
|
||||||
"GoogleOther-Image": {
|
"GoogleOther-Image": {
|
||||||
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
||||||
|
|
@ -237,6 +412,20 @@
|
||||||
"frequency": "No information.",
|
"frequency": "No information.",
|
||||||
"description": "Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies."
|
"description": "Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies."
|
||||||
},
|
},
|
||||||
|
"iAskBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/iaskbot"
|
||||||
|
},
|
||||||
|
"iaskspider": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/iaskspider"
|
||||||
|
},
|
||||||
"iaskspider/2.0": {
|
"iaskspider/2.0": {
|
||||||
"description": "Used to provide answers to user queries.",
|
"description": "Used to provide answers to user queries.",
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
|
|
@ -244,20 +433,34 @@
|
||||||
"operator": "iAsk",
|
"operator": "iAsk",
|
||||||
"respect": "No"
|
"respect": "No"
|
||||||
},
|
},
|
||||||
|
"IbouBot": {
|
||||||
|
"operator": "Ibou",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Search result generation.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Ibou.io operates a crawler service named IbouBot which fuels and updates their graph representation of the World Wide Web. This database and all the metrics are used to provide a search engine."
|
||||||
|
},
|
||||||
"ICC-Crawler": {
|
"ICC-Crawler": {
|
||||||
"description": "Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business.",
|
|
||||||
"frequency": "No information.",
|
|
||||||
"function": "Scrapes data to train and support AI technologies.",
|
|
||||||
"operator": "[NICT](https://nict.go.jp)",
|
"operator": "[NICT](https://nict.go.jp)",
|
||||||
"respect": "Yes"
|
"respect": "Yes",
|
||||||
|
"function": "Scrapes data to train and support AI technologies.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business."
|
||||||
},
|
},
|
||||||
"ImagesiftBot": {
|
"ImagesiftBot": {
|
||||||
"description": "Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images.",
|
"description": "Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Their web intelligence products use this index to enable search and retrieval of similar images.",
|
||||||
"frequency": "No information.",
|
"frequency": "No information.",
|
||||||
"function": "ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products",
|
"function": "ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support their suite of web intelligence products",
|
||||||
"operator": "[ImageSift](https://imagesift.com)",
|
"operator": "[ImageSift](https://imagesift.com)",
|
||||||
"respect": "[Yes](https://imagesift.com/about)"
|
"respect": "[Yes](https://imagesift.com/about)"
|
||||||
},
|
},
|
||||||
|
"imageSpider": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/imagespider"
|
||||||
|
},
|
||||||
"img2dataset": {
|
"img2dataset": {
|
||||||
"description": "Downloads large sets of images into datasets for LLM training or other purposes.",
|
"description": "Downloads large sets of images into datasets for LLM training or other purposes.",
|
||||||
"frequency": "At the discretion of img2dataset users.",
|
"frequency": "At the discretion of img2dataset users.",
|
||||||
|
|
@ -279,6 +482,69 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot"
|
"description": "Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot"
|
||||||
},
|
},
|
||||||
|
"KlaviyoAIBot": {
|
||||||
|
"operator": "[Klaviyo](https://www.klaviyo.com)",
|
||||||
|
"respect": "[Yes](https://help.klaviyo.com/hc/en-us/articles/40496146232219)",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Indexes based on 'change signals' and user configuration.",
|
||||||
|
"description": "Indexes content to tailor AI experiences, generate content, answers and recommendations."
|
||||||
|
},
|
||||||
|
"KunatoCrawler": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/kunatocrawler"
|
||||||
|
},
|
||||||
|
"laion-huggingface-processor": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/laion-huggingface-processor"
|
||||||
|
},
|
||||||
|
"LAIONDownloader": {
|
||||||
|
"operator": "[Large-scale Artificial Intelligence Open Network](https://laion.ai/)",
|
||||||
|
"respect": "[No](https://laion.ai/faq/)",
|
||||||
|
"function": "AI tools and models for machine learning research.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "LAIONDownloader is a bot by LAION, a non-profit organization that provides datasets, tools and models to liberate machine learning research."
|
||||||
|
},
|
||||||
|
"LCC": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/lcc"
|
||||||
|
},
|
||||||
|
"LinerBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "LinerBot is the web crawler used by Liner AI assistant to gather information from academic sources and websites to provide accurate answers with line-by-line source citations for research and scholarly work. More info can be found at https://darkvisitors.com/agents/agents/linerbot"
|
||||||
|
},
|
||||||
|
"Linguee Bot": {
|
||||||
|
"operator": "[Linguee](https://www.linguee.com)",
|
||||||
|
"respect": "No",
|
||||||
|
"function": "AI powered translation service",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Linguee Bot is a web crawler used by Linguee to gather training data for its AI powered translation service."
|
||||||
|
},
|
||||||
|
"LinkupBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/linkupbot"
|
||||||
|
},
|
||||||
|
"Manus-User": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/manus-user"
|
||||||
|
},
|
||||||
"meta-externalagent": {
|
"meta-externalagent": {
|
||||||
"operator": "[Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers)",
|
"operator": "[Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers)",
|
||||||
"respect": "Yes",
|
"respect": "Yes",
|
||||||
|
|
@ -307,6 +573,20 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher"
|
"description": "Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher"
|
||||||
},
|
},
|
||||||
|
"meta-webindexer": {
|
||||||
|
"operator": "[Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers/)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unhinged, more than 1 per second.",
|
||||||
|
"description": "As per their documentation, \"The Meta-WebIndexer crawler navigates the web to improve Meta AI search result quality for users. In doing so, Meta analyzes online content to enhance the relevance and accuracy of Meta AI. Allowing Meta-WebIndexer in your robots.txt file helps us cite and link to your content in Meta AI's responses.\""
|
||||||
|
},
|
||||||
|
"MistralAI-User": {
|
||||||
|
"operator": "Mistral",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "MistralAI-User is an AI assistant operated by Mistral. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/mistralai-user"
|
||||||
|
},
|
||||||
"MistralAI-User/1.0": {
|
"MistralAI-User/1.0": {
|
||||||
"operator": "Mistral AI",
|
"operator": "Mistral AI",
|
||||||
"function": "Takes action based on user prompts.",
|
"function": "Takes action based on user prompts.",
|
||||||
|
|
@ -314,6 +594,27 @@
|
||||||
"description": "MistralAI-User is for user actions in LeChat. When users ask LeChat a question, it may visit a web page to help answer and include a link to the source in its response.",
|
"description": "MistralAI-User is for user actions in LeChat. When users ask LeChat a question, it may visit a web page to help answer and include a link to the source in its response.",
|
||||||
"respect": "Yes"
|
"respect": "Yes"
|
||||||
},
|
},
|
||||||
|
"MyCentralAIScraperBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI data scraper",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Operator and data use is unclear at this time."
|
||||||
|
},
|
||||||
|
"netEstate Imprint Crawler": {
|
||||||
|
"operator": "netEstate",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "netEstate Imprint Crawler is an AI data scraper operated by netEstate. If you think this is incorrect or can provide additional detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/netestate-imprint-crawler"
|
||||||
|
},
|
||||||
|
"NotebookLM": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "NotebookLM is an AI-powered research and note-taking assistant that helps users synthesize information from their own uploaded sources, such as documents, transcripts, or web content. It can generate summaries, answer questions, and highlight key themes from the materials you provide, acting like a personalized research companion built on Google's Gemini model. NotebookLM fetches source URLs when users add them to their notebooks, enabling the AI to access and analyze those pages for context and insights. More info can be found at https://darkvisitors.com/agents/agents/google-notebooklm"
|
||||||
|
},
|
||||||
"NovaAct": {
|
"NovaAct": {
|
||||||
"operator": "Unclear at this time.",
|
"operator": "Unclear at this time.",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -342,6 +643,13 @@
|
||||||
"operator": "[Webz.io](https://webz.io/)",
|
"operator": "[Webz.io](https://webz.io/)",
|
||||||
"respect": "[Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html)"
|
"respect": "[Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html)"
|
||||||
},
|
},
|
||||||
|
"OpenAI": {
|
||||||
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Unclear at this time.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "The purpose of this bot is unclear at this time but it is a member of OpenAI's suite of crawlers."
|
||||||
|
},
|
||||||
"Operator": {
|
"Operator": {
|
||||||
"operator": "Unclear at this time.",
|
"operator": "Unclear at this time.",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -385,25 +693,39 @@
|
||||||
"description": "Crawls sites to surface as results in Perplexity."
|
"description": "Crawls sites to surface as results in Perplexity."
|
||||||
},
|
},
|
||||||
"PetalBot": {
|
"PetalBot": {
|
||||||
"description": "Operated by Huawei to provide search and AI assistant services.",
|
|
||||||
"frequency": "No explicit frequency provided.",
|
|
||||||
"function": "Used to provide recommendations in Hauwei assistant and AI search services.",
|
|
||||||
"operator": "[Huawei](https://huawei.com/)",
|
"operator": "[Huawei](https://huawei.com/)",
|
||||||
"respect": "Yes"
|
"respect": "Yes",
|
||||||
|
"function": "Used to provide recommendations in Hauwei assistant and AI search services.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"description": "Operated by Huawei to provide search and AI assistant services."
|
||||||
},
|
},
|
||||||
"PhindBot": {
|
"PhindBot": {
|
||||||
"description": "Company offers an AI agent that uses AI and generate extra web query on the fly",
|
|
||||||
"frequency": "No explicit frequency provided.",
|
|
||||||
"function": "AI-enhanced search engine.",
|
|
||||||
"operator": "[phind](https://www.phind.com/)",
|
"operator": "[phind](https://www.phind.com/)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI-enhanced search engine.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"description": "Company offers an AI agent that uses AI and generate extra web query on the fly"
|
||||||
|
},
|
||||||
|
"Poggio-Citations": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/poggio-citations"
|
||||||
|
},
|
||||||
|
"Poseidon Research Crawler": {
|
||||||
|
"operator": "[Poseidon Research](https://www.poseidonresearch.com)",
|
||||||
|
"description": "Lab focused on scaling the interpretability research necessary to make better AI systems possible.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "AI research crawler",
|
||||||
"respect": "Unclear at this time."
|
"respect": "Unclear at this time."
|
||||||
},
|
},
|
||||||
"QualifiedBot": {
|
"QualifiedBot": {
|
||||||
"description": "Operated by Qualified as part of their suite of AI product offerings.",
|
|
||||||
"frequency": "No explicit frequency provided.",
|
|
||||||
"function": "Company offers AI agents and other related products; usage can be assumed to support said products.",
|
|
||||||
"operator": "[Qualified](https://www.qualified.com)",
|
"operator": "[Qualified](https://www.qualified.com)",
|
||||||
"respect": "Unclear at this time."
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Company offers AI agents and other related products; usage can be assumed to support said products.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"description": "Operated by Qualified as part of their suite of AI product offerings."
|
||||||
},
|
},
|
||||||
"QuillBot": {
|
"QuillBot": {
|
||||||
"description": "Operated by QuillBot as part of their suite of AI product offerings.",
|
"description": "Operated by QuillBot as part of their suite of AI product offerings.",
|
||||||
|
|
@ -420,11 +742,11 @@
|
||||||
"respect": "Unclear at this time."
|
"respect": "Unclear at this time."
|
||||||
},
|
},
|
||||||
"SBIntuitionsBot": {
|
"SBIntuitionsBot": {
|
||||||
"description": "AI development and information analysis",
|
"operator": "[SB Intuitions](https://www.sbintuitions.co.jp/en/)",
|
||||||
"respect": "[Yes](https://www.sbintuitions.co.jp/en/bot/)",
|
"respect": "[Yes](https://www.sbintuitions.co.jp/en/bot/)",
|
||||||
"frequency": "No information.",
|
|
||||||
"function": "Uses data gathered in AI development and information analysis.",
|
"function": "Uses data gathered in AI development and information analysis.",
|
||||||
"operator": "[SB Intuitions](https://www.sbintuitions.co.jp/en/)"
|
"frequency": "No information.",
|
||||||
|
"description": "AI development and information analysis"
|
||||||
},
|
},
|
||||||
"Scrapy": {
|
"Scrapy": {
|
||||||
"description": "\"AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets.\"",
|
"description": "\"AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets.\"",
|
||||||
|
|
@ -438,14 +760,21 @@
|
||||||
"respect": "[Yes](https://www.semrush.com/bot/)",
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
"function": "Crawls your site for ContentShake AI tool.",
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
"frequency": "Roughly once every 10 seconds.",
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
"description": "Data collected is used for the ContentShake AI tool reports."
|
||||||
},
|
},
|
||||||
"SemrushBot-SWA": {
|
"SemrushBot-SWA": {
|
||||||
"operator": "[Semrush](https://www.semrush.com/)",
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
"respect": "[Yes](https://www.semrush.com/bot/)",
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
"function": "Checks URLs on your site for SWA tool.",
|
"function": "Checks URLs on your site for SEO Writing Assistant.",
|
||||||
"frequency": "Roughly once every 10 seconds.",
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
"description": "Data collected is used for the SEO Writing Assistant tool to check if URL is accessible."
|
||||||
|
},
|
||||||
|
"ShapBot": {
|
||||||
|
"operator": "[Parallel](https://parallel.ai)",
|
||||||
|
"respect": "[Yes](https://docs.parallel.ai/features/crawler)",
|
||||||
|
"function": "Collects data for Parallel's web APIs.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "ShapBot helps discover and index websites for Parallel's web APIs."
|
||||||
},
|
},
|
||||||
"Sidetrade indexer bot": {
|
"Sidetrade indexer bot": {
|
||||||
"description": "AI product training.",
|
"description": "AI product training.",
|
||||||
|
|
@ -454,6 +783,34 @@
|
||||||
"operator": "[Sidetrade](https://www.sidetrade.com)",
|
"operator": "[Sidetrade](https://www.sidetrade.com)",
|
||||||
"respect": "Unclear at this time."
|
"respect": "Unclear at this time."
|
||||||
},
|
},
|
||||||
|
"Spider": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/spider"
|
||||||
|
},
|
||||||
|
"TavilyBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/tavilybot"
|
||||||
|
},
|
||||||
|
"TerraCotta": {
|
||||||
|
"operator": "[Ceramic AI](https://ceramic.ai/)",
|
||||||
|
"respect": "[Yes](https://github.com/CeramicTeam/CeramicTerracotta)",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Downloads data to train LLMs."
|
||||||
|
},
|
||||||
|
"Thinkbot": {
|
||||||
|
"operator": "[Thinkbot](https://www.thinkbot.agency)",
|
||||||
|
"respect": "No",
|
||||||
|
"function": "Insights on AI integration and automation.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Collects data for analysis on AI usage and automation."
|
||||||
|
},
|
||||||
"TikTokSpider": {
|
"TikTokSpider": {
|
||||||
"operator": "ByteDance",
|
"operator": "ByteDance",
|
||||||
"respect": "Unclear at this time.",
|
"respect": "Unclear at this time.",
|
||||||
|
|
@ -468,12 +825,26 @@
|
||||||
"frequency": "No information.",
|
"frequency": "No information.",
|
||||||
"description": "Makes data available for training AI models."
|
"description": "Makes data available for training AI models."
|
||||||
},
|
},
|
||||||
|
"TwinAgent": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/twinagent"
|
||||||
|
},
|
||||||
"VelenPublicWebCrawler": {
|
"VelenPublicWebCrawler": {
|
||||||
"description": "\"Our goal with this crawler is to build business datasets and machine learning models to better understand the web.\"",
|
|
||||||
"frequency": "No information.",
|
|
||||||
"function": "Scrapes data for business data sets and machine learning models.",
|
|
||||||
"operator": "[Velen Crawler](https://velen.io)",
|
"operator": "[Velen Crawler](https://velen.io)",
|
||||||
"respect": "[Yes](https://velen.io)"
|
"respect": "[Yes](https://velen.io)",
|
||||||
|
"function": "Scrapes data for business data sets and machine learning models.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "\"Our goal with this crawler is to build business datasets and machine learning models to better understand the web.\""
|
||||||
|
},
|
||||||
|
"WARDBot": {
|
||||||
|
"operator": "WEBSPARK",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "WARDBot is an AI data scraper operated by WEBSPARK. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/wardbot"
|
||||||
},
|
},
|
||||||
"Webzio-Extended": {
|
"Webzio-Extended": {
|
||||||
"operator": "Unclear at this time.",
|
"operator": "Unclear at this time.",
|
||||||
|
|
@ -482,6 +853,13 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended"
|
"description": "Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended"
|
||||||
},
|
},
|
||||||
|
"webzio-extended": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/webzio-extended"
|
||||||
|
},
|
||||||
"wpbot": {
|
"wpbot": {
|
||||||
"operator": "[QuantumCloud](https://www.quantumcloud.com)",
|
"operator": "[QuantumCloud](https://www.quantumcloud.com)",
|
||||||
"respect": "Unclear at this time; opt out provided via [Google Form](https://forms.gle/ajBaxygz9jSR8p8G9)",
|
"respect": "Unclear at this time; opt out provided via [Google Form](https://forms.gle/ajBaxygz9jSR8p8G9)",
|
||||||
|
|
@ -489,6 +867,20 @@
|
||||||
"frequency": "Unclear at this time.",
|
"frequency": "Unclear at this time.",
|
||||||
"description": "wpbot is a used to support the functionality of the AI Chatbot for WordPress plugin. It supports the use of customer models, data collection and customer support."
|
"description": "wpbot is a used to support the functionality of the AI Chatbot for WordPress plugin. It supports the use of customer models, data collection and customer support."
|
||||||
},
|
},
|
||||||
|
"WRTNBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/wrtnbot"
|
||||||
|
},
|
||||||
|
"YaK": {
|
||||||
|
"operator": "[Meltwater](https://www.meltwater.com/en/suite/consumer-intelligence)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "According to the [Meltwater Consumer Intelligence page](https://www.meltwater.com/en/suite/consumer-intelligence) 'By applying AI, data science, and market research expertise to a live feed of global data sources, we transform unstructured data into actionable insights allowing better decision-making'.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Retrieves data used for Meltwater's AI enabled consumer intelligence suite"
|
||||||
|
},
|
||||||
"YandexAdditional": {
|
"YandexAdditional": {
|
||||||
"operator": "[Yandex](https://yandex.ru)",
|
"operator": "[Yandex](https://yandex.ru)",
|
||||||
"respect": "[Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en)",
|
"respect": "[Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en)",
|
||||||
|
|
@ -509,5 +901,12 @@
|
||||||
"function": "Scrapes data for search engine and LLMs.",
|
"function": "Scrapes data for search engine and LLMs.",
|
||||||
"frequency": "No information.",
|
"frequency": "No information.",
|
||||||
"description": "Retrieves data used for You.com web search engine and LLMs."
|
"description": "Retrieves data used for You.com web search engine and LLMs."
|
||||||
|
},
|
||||||
|
"ZanistaBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/zanistabot"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
57
robots.txt
57
robots.txt
|
|
@ -1,52 +1,96 @@
|
||||||
|
User-agent: AddSearchBot
|
||||||
User-agent: AI2Bot
|
User-agent: AI2Bot
|
||||||
|
User-agent: AI2Bot-DeepResearchEval
|
||||||
User-agent: Ai2Bot-Dolma
|
User-agent: Ai2Bot-Dolma
|
||||||
User-agent: aiHitBot
|
User-agent: aiHitBot
|
||||||
|
User-agent: amazon-kendra
|
||||||
User-agent: Amazonbot
|
User-agent: Amazonbot
|
||||||
|
User-agent: AmazonBuyForMe
|
||||||
User-agent: Andibot
|
User-agent: Andibot
|
||||||
|
User-agent: Anomura
|
||||||
User-agent: anthropic-ai
|
User-agent: anthropic-ai
|
||||||
User-agent: Applebot
|
User-agent: Applebot
|
||||||
User-agent: Applebot-Extended
|
User-agent: Applebot-Extended
|
||||||
|
User-agent: atlassian-bot
|
||||||
|
User-agent: Awario
|
||||||
User-agent: bedrockbot
|
User-agent: bedrockbot
|
||||||
|
User-agent: bigsur.ai
|
||||||
|
User-agent: Bravebot
|
||||||
User-agent: Brightbot 1.0
|
User-agent: Brightbot 1.0
|
||||||
|
User-agent: BuddyBot
|
||||||
User-agent: Bytespider
|
User-agent: Bytespider
|
||||||
User-agent: CCBot
|
User-agent: CCBot
|
||||||
|
User-agent: Channel3Bot
|
||||||
|
User-agent: ChatGLM-Spider
|
||||||
|
User-agent: ChatGPT Agent
|
||||||
User-agent: ChatGPT-User
|
User-agent: ChatGPT-User
|
||||||
User-agent: Claude-SearchBot
|
User-agent: Claude-SearchBot
|
||||||
User-agent: Claude-User
|
User-agent: Claude-User
|
||||||
User-agent: Claude-Web
|
User-agent: Claude-Web
|
||||||
User-agent: ClaudeBot
|
User-agent: ClaudeBot
|
||||||
|
User-agent: Cloudflare-AutoRAG
|
||||||
|
User-agent: CloudVertexBot
|
||||||
User-agent: cohere-ai
|
User-agent: cohere-ai
|
||||||
User-agent: cohere-training-data-crawler
|
User-agent: cohere-training-data-crawler
|
||||||
User-agent: Cotoyogi
|
User-agent: Cotoyogi
|
||||||
|
User-agent: Crawl4AI
|
||||||
User-agent: Crawlspace
|
User-agent: Crawlspace
|
||||||
|
User-agent: Datenbank Crawler
|
||||||
|
User-agent: DeepSeekBot
|
||||||
|
User-agent: Devin
|
||||||
User-agent: Diffbot
|
User-agent: Diffbot
|
||||||
User-agent: DuckAssistBot
|
User-agent: DuckAssistBot
|
||||||
|
User-agent: Echobot Bot
|
||||||
User-agent: EchoboxBot
|
User-agent: EchoboxBot
|
||||||
User-agent: FacebookBot
|
User-agent: FacebookBot
|
||||||
|
User-agent: facebookexternalhit
|
||||||
User-agent: Factset_spyderbot
|
User-agent: Factset_spyderbot
|
||||||
User-agent: FirecrawlAgent
|
User-agent: FirecrawlAgent
|
||||||
User-agent: FriendlyCrawler
|
User-agent: FriendlyCrawler
|
||||||
|
User-agent: Gemini-Deep-Research
|
||||||
User-agent: Google-CloudVertexBot
|
User-agent: Google-CloudVertexBot
|
||||||
User-agent: Google-Extended
|
User-agent: Google-Extended
|
||||||
|
User-agent: Google-Firebase
|
||||||
|
User-agent: Google-NotebookLM
|
||||||
|
User-agent: GoogleAgent-Mariner
|
||||||
User-agent: GoogleOther
|
User-agent: GoogleOther
|
||||||
User-agent: GoogleOther-Image
|
User-agent: GoogleOther-Image
|
||||||
User-agent: GoogleOther-Video
|
User-agent: GoogleOther-Video
|
||||||
User-agent: GPTBot
|
User-agent: GPTBot
|
||||||
|
User-agent: iAskBot
|
||||||
|
User-agent: iaskspider
|
||||||
User-agent: iaskspider/2.0
|
User-agent: iaskspider/2.0
|
||||||
|
User-agent: IbouBot
|
||||||
User-agent: ICC-Crawler
|
User-agent: ICC-Crawler
|
||||||
User-agent: ImagesiftBot
|
User-agent: ImagesiftBot
|
||||||
|
User-agent: imageSpider
|
||||||
User-agent: img2dataset
|
User-agent: img2dataset
|
||||||
User-agent: ISSCyberRiskCrawler
|
User-agent: ISSCyberRiskCrawler
|
||||||
User-agent: Kangaroo Bot
|
User-agent: Kangaroo Bot
|
||||||
|
User-agent: KlaviyoAIBot
|
||||||
|
User-agent: KunatoCrawler
|
||||||
|
User-agent: laion-huggingface-processor
|
||||||
|
User-agent: LAIONDownloader
|
||||||
|
User-agent: LCC
|
||||||
|
User-agent: LinerBot
|
||||||
|
User-agent: Linguee Bot
|
||||||
|
User-agent: LinkupBot
|
||||||
|
User-agent: Manus-User
|
||||||
User-agent: meta-externalagent
|
User-agent: meta-externalagent
|
||||||
User-agent: Meta-ExternalAgent
|
User-agent: Meta-ExternalAgent
|
||||||
User-agent: meta-externalfetcher
|
User-agent: meta-externalfetcher
|
||||||
User-agent: Meta-ExternalFetcher
|
User-agent: Meta-ExternalFetcher
|
||||||
|
User-agent: meta-webindexer
|
||||||
|
User-agent: MistralAI-User
|
||||||
User-agent: MistralAI-User/1.0
|
User-agent: MistralAI-User/1.0
|
||||||
|
User-agent: MyCentralAIScraperBot
|
||||||
|
User-agent: netEstate Imprint Crawler
|
||||||
|
User-agent: NotebookLM
|
||||||
User-agent: NovaAct
|
User-agent: NovaAct
|
||||||
User-agent: OAI-SearchBot
|
User-agent: OAI-SearchBot
|
||||||
User-agent: omgili
|
User-agent: omgili
|
||||||
User-agent: omgilibot
|
User-agent: omgilibot
|
||||||
|
User-agent: OpenAI
|
||||||
User-agent: Operator
|
User-agent: Operator
|
||||||
User-agent: PanguBot
|
User-agent: PanguBot
|
||||||
User-agent: Panscient
|
User-agent: Panscient
|
||||||
|
|
@ -55,6 +99,8 @@ User-agent: Perplexity-User
|
||||||
User-agent: PerplexityBot
|
User-agent: PerplexityBot
|
||||||
User-agent: PetalBot
|
User-agent: PetalBot
|
||||||
User-agent: PhindBot
|
User-agent: PhindBot
|
||||||
|
User-agent: Poggio-Citations
|
||||||
|
User-agent: Poseidon Research Crawler
|
||||||
User-agent: QualifiedBot
|
User-agent: QualifiedBot
|
||||||
User-agent: QuillBot
|
User-agent: QuillBot
|
||||||
User-agent: quillbot.com
|
User-agent: quillbot.com
|
||||||
|
|
@ -62,13 +108,24 @@ User-agent: SBIntuitionsBot
|
||||||
User-agent: Scrapy
|
User-agent: Scrapy
|
||||||
User-agent: SemrushBot-OCOB
|
User-agent: SemrushBot-OCOB
|
||||||
User-agent: SemrushBot-SWA
|
User-agent: SemrushBot-SWA
|
||||||
|
User-agent: ShapBot
|
||||||
User-agent: Sidetrade indexer bot
|
User-agent: Sidetrade indexer bot
|
||||||
|
User-agent: Spider
|
||||||
|
User-agent: TavilyBot
|
||||||
|
User-agent: TerraCotta
|
||||||
|
User-agent: Thinkbot
|
||||||
User-agent: TikTokSpider
|
User-agent: TikTokSpider
|
||||||
User-agent: Timpibot
|
User-agent: Timpibot
|
||||||
|
User-agent: TwinAgent
|
||||||
User-agent: VelenPublicWebCrawler
|
User-agent: VelenPublicWebCrawler
|
||||||
|
User-agent: WARDBot
|
||||||
User-agent: Webzio-Extended
|
User-agent: Webzio-Extended
|
||||||
|
User-agent: webzio-extended
|
||||||
User-agent: wpbot
|
User-agent: wpbot
|
||||||
|
User-agent: WRTNBot
|
||||||
|
User-agent: YaK
|
||||||
User-agent: YandexAdditional
|
User-agent: YandexAdditional
|
||||||
User-agent: YandexAdditionalBot
|
User-agent: YandexAdditionalBot
|
||||||
User-agent: YouBot
|
User-agent: YouBot
|
||||||
|
User-agent: ZanistaBot
|
||||||
Disallow: /
|
Disallow: /
|
||||||
|
|
|
||||||
|
|
@ -1,54 +1,98 @@
|
||||||
| Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description |
|
| Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description |
|
||||||
|------|----------|-----------------------|----------|------------------|-------------|
|
|------|----------|-----------------------|----------|------------------|-------------|
|
||||||
|
| AddSearchBot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | AddSearchBot is a web crawler that indexes website content for AddSearch's AI-powered site search solution, collecting data to provide fast and accurate search results. More info can be found at https://darkvisitors.com/agents/agents/addsearchbot |
|
||||||
| AI2Bot | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. |
|
| AI2Bot | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. |
|
||||||
|
| AI2Bot\-DeepResearchEval | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/ai2bot-deepresearcheval |
|
||||||
| Ai2Bot\-Dolma | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. |
|
| Ai2Bot\-Dolma | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. |
|
||||||
| aiHitBot | [aiHit](https://www.aihitdata.com/about) | Yes | A massive, artificial intelligence/machine learning, automated system. | No information provided. | Scrapes data for AI systems. |
|
| aiHitBot | [aiHit](https://www.aihitdata.com/about) | Yes | A massive, artificial intelligence/machine learning, automated system. | No information provided. | Scrapes data for AI systems. |
|
||||||
|
| amazon\-kendra | Amazon | Yes | Collects data for AI natural language search | No information provided. | Amazon Kendra is a highly accurate intelligent search service that enables your users to search unstructured data using natural language. It returns specific answers to questions, giving users an experience that's close to interacting with a human expert. It is highly scalable and capable of meeting performance demands, tightly integrated with other AWS services such as Amazon S3 and Amazon Lex, and offers enterprise-grade security. |
|
||||||
| Amazonbot | Amazon | Yes | Service improvement and enabling answers for Alexa users. | No information provided. | Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses. |
|
| Amazonbot | Amazon | Yes | Service improvement and enabling answers for Alexa users. | No information provided. | Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses. |
|
||||||
|
| AmazonBuyForMe | [Amazon](https://amazon.com) | Unclear at this time. | AI Agents | No information provided. | Buy For Me is an AI agent that helps buy products at the direction of customers. |
|
||||||
| Andibot | [Andi](https://andisearch.com/) | Unclear at this time | Search engine using generative AI, AI Search Assistant | No information provided. | Scrapes website and provides AI summary. |
|
| Andibot | [Andi](https://andisearch.com/) | Unclear at this time | Search engine using generative AI, AI Search Assistant | No information provided. | Scrapes website and provides AI summary. |
|
||||||
|
| Anomura | [Direqt](https://direqt.ai) | Yes | Collects data for AI search | No information provided. | Anomura is Direqt's search crawler, it discovers and indexes pages their customers websites. |
|
||||||
| anthropic\-ai | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. |
|
| anthropic\-ai | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. |
|
||||||
| Applebot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot |
|
| Applebot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot |
|
||||||
| Applebot\-Extended | [Apple](https://support.apple.com/en-us/119829#datausage) | Yes | Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others. | Unclear at this time. | Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools. |
|
| Applebot\-Extended | [Apple](https://support.apple.com/en-us/119829#datausage) | Yes | Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others. | Unclear at this time. | Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools. |
|
||||||
|
| atlassian\-bot | [Atlassian](https://www.atlassian.com) | [Yes](https://support.atlassian.com/organization-administration/docs/connect-custom-website-to-rovo/#Editing-your-robots.txt) | AI search, assistants and agents | No information provided. | atlassian-bot is a web crawler used to index website content for its AI search, assistants and agents available in its Rovo GenAI product. |
|
||||||
|
| Awario | Awario | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Awario is an AI data scraper operated by Awario. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/awario |
|
||||||
| bedrockbot | [Amazon](https://amazon.com) | [Yes](https://docs.aws.amazon.com/bedrock/latest/userguide/webcrawl-data-source-connector.html#configuration-webcrawl-connector) | Data scraping for custom AI applications. | Unclear at this time. | Connects to and crawls URLs that have been selected for use in a user's AWS bedrock application. |
|
| bedrockbot | [Amazon](https://amazon.com) | [Yes](https://docs.aws.amazon.com/bedrock/latest/userguide/webcrawl-data-source-connector.html#configuration-webcrawl-connector) | Data scraping for custom AI applications. | Unclear at this time. | Connects to and crawls URLs that have been selected for use in a user's AWS bedrock application. |
|
||||||
| Brightbot 1\.0 | Browsing.ai | Unclear at this time. | LLM/AI training. | Unclear at this time. | Scrapes data to train LLMs and AI products focused on website customer support. |
|
| bigsur\.ai | Big Sur AI that fetches website content to enable AI-powered web agents, sales assistants, and content marketing solutions for businesses | Unclear at this time. | AI Assistants | Unclear at this time. | bigsur.ai is a web crawler operated by Big Sur AI that fetches website content to enable AI-powered web agents, sales assistants, and content marketing solutions for businesses. More info can be found at https://darkvisitors.com/agents/agents/bigsur-ai |
|
||||||
|
| Bravebot | https://safe.search.brave.com/help/brave-search-crawler | Yes | Collects data for AI search | Unclear at this time. | Brave search has a crawler to discover new pages and index their content. |
|
||||||
|
| Brightbot 1\.0 | https://brightdata.com/brightbot | Unclear at this time. | LLM/AI training. | At least one per minute. | Scrapes data to train LLMs and AI products focused on website customer support, [uses residential IPs and legit-looking user-agents to disguise itself](https://ksol.io/en/blog/posts/brightbot-not-that-bright/). |
|
||||||
|
| BuddyBot | [BuddyBotLearning](https://www.buddybotlearning.com) | Unclear at this time. | AI Learning Companion | Unclear at this time. | BuddyBot is a voice-controlled AI learning companion targeted at childhooded STEM education. |
|
||||||
| Bytespider | ByteDance | No | LLM training. | Unclear at this time. | Downloads data to train LLMS, including ChatGPT competitors. |
|
| Bytespider | ByteDance | No | LLM training. | Unclear at this time. | Downloads data to train LLMS, including ChatGPT competitors. |
|
||||||
| CCBot | [Common Crawl Foundation](https://commoncrawl.org) | [Yes](https://commoncrawl.org/ccbot) | Provides open crawl dataset, used for many purposes, including Machine Learning/AI. | Monthly at present. | Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers). |
|
| CCBot | [Common Crawl Foundation](https://commoncrawl.org) | [Yes](https://commoncrawl.org/ccbot) | Provides open crawl dataset, used for many purposes, including Machine Learning/AI. | Monthly at present. | Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers). |
|
||||||
|
| Channel3Bot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/channel3bot |
|
||||||
|
| ChatGLM\-Spider | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/chatglm-spider |
|
||||||
|
| ChatGPT Agent | [OpenAI](https://openai.com) | Yes | AI Agents | Unclear at this time. | ChatGPT Agent is an AI agent created by OpenAI that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/chatgpt-agent |
|
||||||
| ChatGPT\-User | [OpenAI](https://openai.com) | Yes | Takes action based on user prompts. | Only when prompted by a user. | Used by plugins in ChatGPT to answer queries based on user input. |
|
| ChatGPT\-User | [OpenAI](https://openai.com) | Yes | Takes action based on user prompts. | Only when prompted by a user. | Used by plugins in ChatGPT to answer queries based on user input. |
|
||||||
| Claude\-SearchBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses. | No information provided. | Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses. |
|
| Claude\-SearchBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses. | No information provided. | Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses. |
|
||||||
| Claude\-User | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent. | No information provided. | Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent. |
|
| Claude\-User | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent. | No information provided. | Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent. |
|
||||||
| Claude\-Web | Anthropic | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Claude-Web is an AI-related agent operated by Anthropic. It's currently unclear exactly what it's used for, since there's no official documentation. If you can provide more detail, please contact us. More info can be found at https://darkvisitors.com/agents/agents/claude-web |
|
| Claude\-Web | Anthropic | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Claude-Web is an AI-related agent operated by Anthropic. It's currently unclear exactly what it's used for, since there's no official documentation. If you can provide more detail, please contact us. More info can be found at https://darkvisitors.com/agents/agents/claude-web |
|
||||||
| ClaudeBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. |
|
| ClaudeBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. |
|
||||||
|
| Cloudflare\-AutoRAG | [Cloudflare](https://developers.cloudflare.com/autorag) | Yes | Collects data for AI search | Unclear at this time. | AutoRAG is an all-in-one AI search solution. |
|
||||||
|
| CloudVertexBot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | CloudVertexBot is a Google-operated crawler available to site owners to request targeted crawls of their own sites for AI training purposes on the Vertex AI platform. More info can be found at https://darkvisitors.com/agents/agents/cloudvertexbot |
|
||||||
| cohere\-ai | [Cohere](https://cohere.com) | Unclear at this time. | Retrieves data to provide responses to user-initiated prompts. | Takes action based on user prompts. | Retrieves data based on user prompts. |
|
| cohere\-ai | [Cohere](https://cohere.com) | Unclear at this time. | Retrieves data to provide responses to user-initiated prompts. | Takes action based on user prompts. | Retrieves data based on user prompts. |
|
||||||
| cohere\-training\-data\-crawler | Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products | Unclear at this time. | AI Data Scrapers | Unclear at this time. | cohere-training-data-crawler is a web crawler operated by Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products. More info can be found at https://darkvisitors.com/agents/agents/cohere-training-data-crawler |
|
| cohere\-training\-data\-crawler | Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products | Unclear at this time. | AI Data Scrapers | Unclear at this time. | cohere-training-data-crawler is a web crawler operated by Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products. More info can be found at https://darkvisitors.com/agents/agents/cohere-training-data-crawler |
|
||||||
| Cotoyogi | [ROIS](https://ds.rois.ac.jp/en_center8/en_crawler/) | Yes | AI LLM Scraper. | No information provided. | Scrapes data for AI training in Japanese language. |
|
| Cotoyogi | [ROIS](https://ds.rois.ac.jp/en_center8/en_crawler/) | Yes | AI LLM Scraper. | No information provided. | Scrapes data for AI training in Japanese language. |
|
||||||
|
| Crawl4AI | Unclear at this time. | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/crawl4ai |
|
||||||
| Crawlspace | [Crawlspace](https://crawlspace.dev) | [Yes](https://news.ycombinator.com/item?id=42756654) | Scrapes data | Unclear at this time. | Provides crawling services for any purpose, probably including AI model training. |
|
| Crawlspace | [Crawlspace](https://crawlspace.dev) | [Yes](https://news.ycombinator.com/item?id=42756654) | Scrapes data | Unclear at this time. | Provides crawling services for any purpose, probably including AI model training. |
|
||||||
|
| Datenbank Crawler | Datenbank | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Datenbank Crawler is an AI data scraper operated by Datenbank. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/datenbank-crawler |
|
||||||
|
| DeepSeekBot | DeepSeek | No | Training language models and improving AI products | Unclear at this time. | DeepSeekBot is a web crawler used by DeepSeek to train its language models and improve its AI products. |
|
||||||
|
| Devin | Devin AI | Yes | AI Assistants | Unclear at this time. | Devin is a collaborative AI teammate built to help ambitious engineering teams achieve more. |
|
||||||
| Diffbot | [Diffbot](https://www.diffbot.com/) | At the discretion of Diffbot users. | Aggregates structured web data for monitoring and AI model training. | Unclear at this time. | Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training. |
|
| Diffbot | [Diffbot](https://www.diffbot.com/) | At the discretion of Diffbot users. | Aggregates structured web data for monitoring and AI model training. | Unclear at this time. | Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training. |
|
||||||
| DuckAssistBot | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | DuckAssistBot is used by DuckDuckGo's DuckAssist feature to fetch content and generate realtime AI answers to user searches. More info can be found at https://darkvisitors.com/agents/agents/duckassistbot |
|
| DuckAssistBot | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | DuckAssistBot is used by DuckDuckGo's DuckAssist feature to fetch content and generate realtime AI answers to user searches. More info can be found at https://darkvisitors.com/agents/agents/duckassistbot |
|
||||||
|
| Echobot Bot | Echobox | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Echobot Bot is an AI data scraper operated by Echobox. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/echobot-bot |
|
||||||
| EchoboxBot | [Echobox](https://echobox.com) | Unclear at this time. | Data collection to support AI-powered products. | Unclear at this time. | Supports company's AI-powered social and email management products. |
|
| EchoboxBot | [Echobox](https://echobox.com) | Unclear at this time. | Data collection to support AI-powered products. | Unclear at this time. | Supports company's AI-powered social and email management products. |
|
||||||
| FacebookBot | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | Training language models | Up to 1 page per second | Officially used for training Meta "speech recognition technology," unknown if used to train Meta AI specifically. |
|
| FacebookBot | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | Training language models | Up to 1 page per second | Officially used for training Meta "speech recognition technology," unknown if used to train Meta AI specifically. |
|
||||||
|
| facebookexternalhit | Meta/Facebook | [No](https://github.com/ai-robots-txt/ai.robots.txt/issues/40#issuecomment-2524591313) | Ostensibly only for sharing, but likely used as an AI crawler as well | Unclear at this time. | Note that excluding FacebookExternalHit will block incorporating OpenGraph data when sharing in social media, including rich links in Apple's Messages app. [According to Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers/), its purpose is "to crawl the content of an app or website that was shared on one of Meta’s family of apps…". However, see discussions [here](https://github.com/ai-robots-txt/ai.robots.txt/pull/21) and [here](https://github.com/ai-robots-txt/ai.robots.txt/issues/40#issuecomment-2524591313) for evidence to the contrary. |
|
||||||
| Factset\_spyderbot | [Factset](https://www.factset.com/ai) | Unclear at this time. | AI model training. | No information provided. | Scrapes data for AI training. |
|
| Factset\_spyderbot | [Factset](https://www.factset.com/ai) | Unclear at this time. | AI model training. | No information provided. | Scrapes data for AI training. |
|
||||||
| FirecrawlAgent | [Firecrawl](https://www.firecrawl.dev/) | Yes | AI scraper and LLM training | No information provided. | Scrapes data for AI systems and LLM training. |
|
| FirecrawlAgent | [Firecrawl](https://www.firecrawl.dev/) | Yes | AI scraper and LLM training | No information provided. | Scrapes data for AI systems and LLM training. |
|
||||||
| FriendlyCrawler | Unknown | [Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler) | We are using the data from the crawler to build datasets for machine learning experiments. | Unclear at this time. | Unclear who the operator is; but data is used for training/machine learning. |
|
| FriendlyCrawler | Unknown | [Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler) | We are using the data from the crawler to build datasets for machine learning experiments. | Unclear at this time. | Unclear who the operator is; but data is used for training/machine learning. |
|
||||||
|
| Gemini\-Deep\-Research | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Gemini-Deep-Research is the agent responsible for collecting and scanning resources used in Google Gemini's Deep Research feature, which acts as a personal research assistant. More info can be found at https://darkvisitors.com/agents/agents/gemini-deep-research |
|
||||||
| Google\-CloudVertexBot | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Build and manage AI models for businesses employing Vertex AI | No information. | Google-CloudVertexBot crawls sites on the site owners' request when building Vertex AI Agents. |
|
| Google\-CloudVertexBot | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Build and manage AI models for businesses employing Vertex AI | No information. | Google-CloudVertexBot crawls sites on the site owners' request when building Vertex AI Agents. |
|
||||||
| Google\-Extended | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | LLM training. | No information. | Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search. |
|
| Google\-Extended | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | LLM training. | No information. | Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search. |
|
||||||
|
| Google\-Firebase | Google | Unclear at this time. | Used as part of AI apps developed by users of Google's Firebase AI products. | Unclear at this time. | Supports Google's Firebase AI products. |
|
||||||
|
| Google\-NotebookLM | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Google-NotebookLM is an AI-powered research and note-taking assistant that helps users synthesize information from their own uploaded sources, such as documents, transcripts, or web content. It can generate summaries, answer questions, and highlight key themes from the materials you provide, acting like a personalized research companion built on Google's Gemini model. Google-NotebookLM fetches source URLs when users add them to their notebooks, enabling the AI to access and analyze those pages for context and insights. More info can be found at https://darkvisitors.com/agents/agents/google-notebooklm |
|
||||||
|
| GoogleAgent\-Mariner | Google | Unclear at this time. | AI Agents | Unclear at this time. | GoogleAgent-Mariner is an AI agent created by Google that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/googleagent-mariner |
|
||||||
| GoogleOther | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
| GoogleOther | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
||||||
| GoogleOther\-Image | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
| GoogleOther\-Image | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
||||||
| GoogleOther\-Video | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
| GoogleOther\-Video | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." |
|
||||||
| GPTBot | [OpenAI](https://openai.com) | Yes | Scrapes data to train OpenAI's products. | No information. | Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies. |
|
| GPTBot | [OpenAI](https://openai.com) | Yes | Scrapes data to train OpenAI's products. | No information. | Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies. |
|
||||||
|
| iAskBot | Unclear at this time. | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/iaskbot |
|
||||||
|
| iaskspider | Unclear at this time. | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/iaskspider |
|
||||||
| iaskspider/2\.0 | iAsk | No | Crawls sites to provide answers to user queries. | Unclear at this time. | Used to provide answers to user queries. |
|
| iaskspider/2\.0 | iAsk | No | Crawls sites to provide answers to user queries. | Unclear at this time. | Used to provide answers to user queries. |
|
||||||
|
| IbouBot | Ibou | Yes | Search result generation. | Unclear at this time. | Ibou.io operates a crawler service named IbouBot which fuels and updates their graph representation of the World Wide Web. This database and all the metrics are used to provide a search engine. |
|
||||||
| ICC\-Crawler | [NICT](https://nict.go.jp) | Yes | Scrapes data to train and support AI technologies. | No information. | Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business. |
|
| ICC\-Crawler | [NICT](https://nict.go.jp) | Yes | Scrapes data to train and support AI technologies. | No information. | Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business. |
|
||||||
| ImagesiftBot | [ImageSift](https://imagesift.com) | [Yes](https://imagesift.com/about) | ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products | No information. | Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images. |
|
| ImagesiftBot | [ImageSift](https://imagesift.com) | [Yes](https://imagesift.com/about) | ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support their suite of web intelligence products | No information. | Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Their web intelligence products use this index to enable search and retrieval of similar images. |
|
||||||
|
| imageSpider | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/imagespider |
|
||||||
| img2dataset | [img2dataset](https://github.com/rom1504/img2dataset) | Unclear at this time. | Scrapes images for use in LLMs. | At the discretion of img2dataset users. | Downloads large sets of images into datasets for LLM training or other purposes. |
|
| img2dataset | [img2dataset](https://github.com/rom1504/img2dataset) | Unclear at this time. | Scrapes images for use in LLMs. | At the discretion of img2dataset users. | Downloads large sets of images into datasets for LLM training or other purposes. |
|
||||||
| ISSCyberRiskCrawler | [ISS-Corporate](https://iss-cyber.com) | No | Scrapes data to train machine learning models. | No information. | Used to train machine learning based models to quantify cyber risk. |
|
| ISSCyberRiskCrawler | [ISS-Corporate](https://iss-cyber.com) | No | Scrapes data to train machine learning models. | No information. | Used to train machine learning based models to quantify cyber risk. |
|
||||||
| Kangaroo Bot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot |
|
| Kangaroo Bot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot |
|
||||||
|
| KlaviyoAIBot | [Klaviyo](https://www.klaviyo.com) | [Yes](https://help.klaviyo.com/hc/en-us/articles/40496146232219) | AI Search Crawlers | Indexes based on 'change signals' and user configuration. | Indexes content to tailor AI experiences, generate content, answers and recommendations. |
|
||||||
|
| KunatoCrawler | Unclear at this time. | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/kunatocrawler |
|
||||||
|
| laion\-huggingface\-processor | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/laion-huggingface-processor |
|
||||||
|
| LAIONDownloader | [Large-scale Artificial Intelligence Open Network](https://laion.ai/) | [No](https://laion.ai/faq/) | AI tools and models for machine learning research. | Unclear at this time. | LAIONDownloader is a bot by LAION, a non-profit organization that provides datasets, tools and models to liberate machine learning research. |
|
||||||
|
| LCC | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/lcc |
|
||||||
|
| LinerBot | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | LinerBot is the web crawler used by Liner AI assistant to gather information from academic sources and websites to provide accurate answers with line-by-line source citations for research and scholarly work. More info can be found at https://darkvisitors.com/agents/agents/linerbot |
|
||||||
|
| Linguee Bot | [Linguee](https://www.linguee.com) | No | AI powered translation service | Unclear at this time. | Linguee Bot is a web crawler used by Linguee to gather training data for its AI powered translation service. |
|
||||||
|
| LinkupBot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/linkupbot |
|
||||||
|
| Manus\-User | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/manus-user |
|
||||||
| meta\-externalagent | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers) | Yes | Used to train models and improve products. | No information. | "The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly." |
|
| meta\-externalagent | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers) | Yes | Used to train models and improve products. | No information. | "The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly." |
|
||||||
| Meta\-ExternalAgent | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Meta-ExternalAgent is a web crawler used by Meta to download training data for its AI models and improve its products by indexing content directly. More info can be found at https://darkvisitors.com/agents/agents/meta-externalagent |
|
| Meta\-ExternalAgent | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Meta-ExternalAgent is a web crawler used by Meta to download training data for its AI models and improve its products by indexing content directly. More info can be found at https://darkvisitors.com/agents/agents/meta-externalagent |
|
||||||
| meta\-externalfetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher |
|
| meta\-externalfetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher |
|
||||||
| Meta\-ExternalFetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher |
|
| Meta\-ExternalFetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher |
|
||||||
|
| meta\-webindexer | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers/) | Unclear at this time. | AI Assistants | Unhinged, more than 1 per second. | As per their documentation, "The Meta-WebIndexer crawler navigates the web to improve Meta AI search result quality for users. In doing so, Meta analyzes online content to enhance the relevance and accuracy of Meta AI. Allowing Meta-WebIndexer in your robots.txt file helps us cite and link to your content in Meta AI's responses." |
|
||||||
|
| MistralAI\-User | Mistral | Unclear at this time. | AI Assistants | Unclear at this time. | MistralAI-User is an AI assistant operated by Mistral. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/mistralai-user |
|
||||||
| MistralAI\-User/1\.0 | Mistral AI | Yes | Takes action based on user prompts. | Only when prompted by a user. | MistralAI-User is for user actions in LeChat. When users ask LeChat a question, it may visit a web page to help answer and include a link to the source in its response. |
|
| MistralAI\-User/1\.0 | Mistral AI | Yes | Takes action based on user prompts. | Only when prompted by a user. | MistralAI-User is for user actions in LeChat. When users ask LeChat a question, it may visit a web page to help answer and include a link to the source in its response. |
|
||||||
|
| MyCentralAIScraperBot | Unclear at this time. | Unclear at this time. | AI data scraper | Unclear at this time. | Operator and data use is unclear at this time. |
|
||||||
|
| netEstate Imprint Crawler | netEstate | Unclear at this time. | AI Data Scrapers | Unclear at this time. | netEstate Imprint Crawler is an AI data scraper operated by netEstate. If you think this is incorrect or can provide additional detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/netestate-imprint-crawler |
|
||||||
|
| NotebookLM | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | NotebookLM is an AI-powered research and note-taking assistant that helps users synthesize information from their own uploaded sources, such as documents, transcripts, or web content. It can generate summaries, answer questions, and highlight key themes from the materials you provide, acting like a personalized research companion built on Google's Gemini model. NotebookLM fetches source URLs when users add them to their notebooks, enabling the AI to access and analyze those pages for context and insights. More info can be found at https://darkvisitors.com/agents/agents/google-notebooklm |
|
||||||
| NovaAct | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Nova Act is an AI agent created by Amazon that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/novaact |
|
| NovaAct | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Nova Act is an AI agent created by Amazon that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/novaact |
|
||||||
| OAI\-SearchBot | [OpenAI](https://openai.com) | [Yes](https://platform.openai.com/docs/bots) | Search result generation. | No information. | Crawls sites to surface as results in SearchGPT. |
|
| OAI\-SearchBot | [OpenAI](https://openai.com) | [Yes](https://platform.openai.com/docs/bots) | Search result generation. | No information. | Crawls sites to surface as results in SearchGPT. |
|
||||||
| omgili | [Webz.io](https://webz.io/) | [Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/) | Data is sold. | No information. | Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training. |
|
| omgili | [Webz.io](https://webz.io/) | [Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/) | Data is sold. | No information. | Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training. |
|
||||||
| omgilibot | [Webz.io](https://webz.io/) | [Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html) | Data is sold. | No information. | Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io. |
|
| omgilibot | [Webz.io](https://webz.io/) | [Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html) | Data is sold. | No information. | Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io. |
|
||||||
|
| OpenAI | [OpenAI](https://openai.com) | Yes | Unclear at this time. | Unclear at this time. | The purpose of this bot is unclear at this time but it is a member of OpenAI's suite of crawlers. |
|
||||||
| Operator | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Operator is an AI agent created by OpenAI that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/operator |
|
| Operator | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Operator is an AI agent created by OpenAI that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/operator |
|
||||||
| PanguBot | the Chinese company Huawei | Unclear at this time. | AI Data Scrapers | Unclear at this time. | PanguBot is a web crawler operated by the Chinese company Huawei. It's used to download training data for its multimodal LLM (Large Language Model) called PanGu. More info can be found at https://darkvisitors.com/agents/agents/pangubot |
|
| PanguBot | the Chinese company Huawei | Unclear at this time. | AI Data Scrapers | Unclear at this time. | PanguBot is a web crawler operated by the Chinese company Huawei. It's used to download training data for its multimodal LLM (Large Language Model) called PanGu. More info can be found at https://darkvisitors.com/agents/agents/pangubot |
|
||||||
| Panscient | [Panscient](https://panscient.com) | [Yes](https://panscient.com/faq.htm) | Data collection and analysis using machine learning and AI. | The Panscient web crawler will request a page at most once every second from the same domain name or the same IP address. | Compiles data on businesses and business professionals that is structured using AI and machine learning. |
|
| Panscient | [Panscient](https://panscient.com) | [Yes](https://panscient.com/faq.htm) | Data collection and analysis using machine learning and AI. | The Panscient web crawler will request a page at most once every second from the same domain name or the same IP address. | Compiles data on businesses and business professionals that is structured using AI and machine learning. |
|
||||||
|
|
@ -57,19 +101,32 @@
|
||||||
| PerplexityBot | [Perplexity](https://www.perplexity.ai/) | [Yes](https://docs.perplexity.ai/guides/bots) | Search result generation. | No information. | Crawls sites to surface as results in Perplexity. |
|
| PerplexityBot | [Perplexity](https://www.perplexity.ai/) | [Yes](https://docs.perplexity.ai/guides/bots) | Search result generation. | No information. | Crawls sites to surface as results in Perplexity. |
|
||||||
| PetalBot | [Huawei](https://huawei.com/) | Yes | Used to provide recommendations in Hauwei assistant and AI search services. | No explicit frequency provided. | Operated by Huawei to provide search and AI assistant services. |
|
| PetalBot | [Huawei](https://huawei.com/) | Yes | Used to provide recommendations in Hauwei assistant and AI search services. | No explicit frequency provided. | Operated by Huawei to provide search and AI assistant services. |
|
||||||
| PhindBot | [phind](https://www.phind.com/) | Unclear at this time. | AI-enhanced search engine. | No explicit frequency provided. | Company offers an AI agent that uses AI and generate extra web query on the fly |
|
| PhindBot | [phind](https://www.phind.com/) | Unclear at this time. | AI-enhanced search engine. | No explicit frequency provided. | Company offers an AI agent that uses AI and generate extra web query on the fly |
|
||||||
|
| Poggio\-Citations | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/poggio-citations |
|
||||||
|
| Poseidon Research Crawler | [Poseidon Research](https://www.poseidonresearch.com) | Unclear at this time. | AI research crawler | No explicit frequency provided. | Lab focused on scaling the interpretability research necessary to make better AI systems possible. |
|
||||||
| QualifiedBot | [Qualified](https://www.qualified.com) | Unclear at this time. | Company offers AI agents and other related products; usage can be assumed to support said products. | No explicit frequency provided. | Operated by Qualified as part of their suite of AI product offerings. |
|
| QualifiedBot | [Qualified](https://www.qualified.com) | Unclear at this time. | Company offers AI agents and other related products; usage can be assumed to support said products. | No explicit frequency provided. | Operated by Qualified as part of their suite of AI product offerings. |
|
||||||
| QuillBot | [Quillbot](https://quillbot.com) | Unclear at this time. | Company offers AI detection, writing tools and other services. | No explicit frequency provided. | Operated by QuillBot as part of their suite of AI product offerings. |
|
| QuillBot | [Quillbot](https://quillbot.com) | Unclear at this time. | Company offers AI detection, writing tools and other services. | No explicit frequency provided. | Operated by QuillBot as part of their suite of AI product offerings. |
|
||||||
| quillbot\.com | [Quillbot](https://quillbot.com) | Unclear at this time. | Company offers AI detection, writing tools and other services. | No explicit frequency provided. | Operated by QuillBot as part of their suite of AI product offerings. |
|
| quillbot\.com | [Quillbot](https://quillbot.com) | Unclear at this time. | Company offers AI detection, writing tools and other services. | No explicit frequency provided. | Operated by QuillBot as part of their suite of AI product offerings. |
|
||||||
| SBIntuitionsBot | [SB Intuitions](https://www.sbintuitions.co.jp/en/) | [Yes](https://www.sbintuitions.co.jp/en/bot/) | Uses data gathered in AI development and information analysis. | No information. | AI development and information analysis |
|
| SBIntuitionsBot | [SB Intuitions](https://www.sbintuitions.co.jp/en/) | [Yes](https://www.sbintuitions.co.jp/en/bot/) | Uses data gathered in AI development and information analysis. | No information. | AI development and information analysis |
|
||||||
| Scrapy | [Zyte](https://www.zyte.com) | Unclear at this time. | Scrapes data for a variety of uses including training AI. | No information. | "AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets." |
|
| Scrapy | [Zyte](https://www.zyte.com) | Unclear at this time. | Scrapes data for a variety of uses including training AI. | No information. | "AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets." |
|
||||||
| SemrushBot\-OCOB | [Semrush](https://www.semrush.com/) | [Yes](https://www.semrush.com/bot/) | Crawls your site for ContentShake AI tool. | Roughly once every 10 seconds. | You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL). |
|
| SemrushBot\-OCOB | [Semrush](https://www.semrush.com/) | [Yes](https://www.semrush.com/bot/) | Crawls your site for ContentShake AI tool. | Roughly once every 10 seconds. | Data collected is used for the ContentShake AI tool reports. |
|
||||||
| SemrushBot\-SWA | [Semrush](https://www.semrush.com/) | [Yes](https://www.semrush.com/bot/) | Checks URLs on your site for SWA tool. | Roughly once every 10 seconds. | You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL). |
|
| SemrushBot\-SWA | [Semrush](https://www.semrush.com/) | [Yes](https://www.semrush.com/bot/) | Checks URLs on your site for SEO Writing Assistant. | Roughly once every 10 seconds. | Data collected is used for the SEO Writing Assistant tool to check if URL is accessible. |
|
||||||
|
| ShapBot | [Parallel](https://parallel.ai) | [Yes](https://docs.parallel.ai/features/crawler) | Collects data for Parallel's web APIs. | Unclear at this time. | ShapBot helps discover and index websites for Parallel's web APIs. |
|
||||||
| Sidetrade indexer bot | [Sidetrade](https://www.sidetrade.com) | Unclear at this time. | Extracts data for a variety of uses including training AI. | No information. | AI product training. |
|
| Sidetrade indexer bot | [Sidetrade](https://www.sidetrade.com) | Unclear at this time. | Extracts data for a variety of uses including training AI. | No information. | AI product training. |
|
||||||
|
| Spider | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/spider |
|
||||||
|
| TavilyBot | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/tavilybot |
|
||||||
|
| TerraCotta | [Ceramic AI](https://ceramic.ai/) | [Yes](https://github.com/CeramicTeam/CeramicTerracotta) | AI Agents | Unclear at this time. | Downloads data to train LLMs. |
|
||||||
|
| Thinkbot | [Thinkbot](https://www.thinkbot.agency) | No | Insights on AI integration and automation. | Unclear at this time. | Collects data for analysis on AI usage and automation. |
|
||||||
| TikTokSpider | ByteDance | Unclear at this time. | LLM training. | Unclear at this time. | Downloads data to train LLMS, as per Bytespider. |
|
| TikTokSpider | ByteDance | Unclear at this time. | LLM training. | Unclear at this time. | Downloads data to train LLMS, as per Bytespider. |
|
||||||
| Timpibot | [Timpi](https://timpi.io) | Unclear at this time. | Scrapes data for use in training LLMs. | No information. | Makes data available for training AI models. |
|
| Timpibot | [Timpi](https://timpi.io) | Unclear at this time. | Scrapes data for use in training LLMs. | No information. | Makes data available for training AI models. |
|
||||||
|
| TwinAgent | Unclear at this time. | Unclear at this time. | AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/twinagent |
|
||||||
| VelenPublicWebCrawler | [Velen Crawler](https://velen.io) | [Yes](https://velen.io) | Scrapes data for business data sets and machine learning models. | No information. | "Our goal with this crawler is to build business datasets and machine learning models to better understand the web." |
|
| VelenPublicWebCrawler | [Velen Crawler](https://velen.io) | [Yes](https://velen.io) | Scrapes data for business data sets and machine learning models. | No information. | "Our goal with this crawler is to build business datasets and machine learning models to better understand the web." |
|
||||||
|
| WARDBot | WEBSPARK | Unclear at this time. | AI Data Scrapers | Unclear at this time. | WARDBot is an AI data scraper operated by WEBSPARK. It's not currently known to be artificially intelligent or AI-related. If you think that's incorrect or can provide more detail about its purpose, please contact us. More info can be found at https://darkvisitors.com/agents/agents/wardbot |
|
||||||
| Webzio\-Extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended |
|
| Webzio\-Extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended |
|
||||||
|
| webzio\-extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/webzio-extended |
|
||||||
| wpbot | [QuantumCloud](https://www.quantumcloud.com) | Unclear at this time; opt out provided via [Google Form](https://forms.gle/ajBaxygz9jSR8p8G9) | Live chat support and lead generation. | Unclear at this time. | wpbot is a used to support the functionality of the AI Chatbot for WordPress plugin. It supports the use of customer models, data collection and customer support. |
|
| wpbot | [QuantumCloud](https://www.quantumcloud.com) | Unclear at this time; opt out provided via [Google Form](https://forms.gle/ajBaxygz9jSR8p8G9) | Live chat support and lead generation. | Unclear at this time. | wpbot is a used to support the functionality of the AI Chatbot for WordPress plugin. It supports the use of customer models, data collection and customer support. |
|
||||||
|
| WRTNBot | Unclear at this time. | Unclear at this time. | Undocumented AI Agents | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/wrtnbot |
|
||||||
|
| YaK | [Meltwater](https://www.meltwater.com/en/suite/consumer-intelligence) | Unclear at this time. | According to the [Meltwater Consumer Intelligence page](https://www.meltwater.com/en/suite/consumer-intelligence) 'By applying AI, data science, and market research expertise to a live feed of global data sources, we transform unstructured data into actionable insights allowing better decision-making'. | Unclear at this time. | Retrieves data used for Meltwater's AI enabled consumer intelligence suite |
|
||||||
| YandexAdditional | [Yandex](https://yandex.ru) | [Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en) | Scrapes/analyzes data for the YandexGPT LLM. | No information. | Retrieves data used for YandexGPT quick answers features. |
|
| YandexAdditional | [Yandex](https://yandex.ru) | [Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en) | Scrapes/analyzes data for the YandexGPT LLM. | No information. | Retrieves data used for YandexGPT quick answers features. |
|
||||||
| YandexAdditionalBot | [Yandex](https://yandex.ru) | [Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en) | Scrapes/analyzes data for the YandexGPT LLM. | No information. | Retrieves data used for YandexGPT quick answers features. |
|
| YandexAdditionalBot | [Yandex](https://yandex.ru) | [Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en) | Scrapes/analyzes data for the YandexGPT LLM. | No information. | Retrieves data used for YandexGPT quick answers features. |
|
||||||
| YouBot | [You](https://about.you.com/youchat/) | [Yes](https://about.you.com/youbot/) | Scrapes data for search engine and LLMs. | No information. | Retrieves data used for You.com web search engine and LLMs. |
|
| YouBot | [You](https://about.you.com/youchat/) | [Yes](https://about.you.com/youbot/) | Scrapes data for search engine and LLMs. | No information. | Retrieves data used for You.com web search engine and LLMs. |
|
||||||
|
| ZanistaBot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Description unavailable from darkvisitors.com More info can be found at https://darkvisitors.com/agents/agents/zanistabot |
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue