mirror of
https://github.com/simonw/datasette.git
synced 2025-12-10 16:51:24 +01:00
table.csv?_stream=1 to download all rows - refs #266
This option causes Datasette to serve ALL rows in the table, by internally following the _next= pagination links and serving everything out as a stream. Also added new config option, allow_csv_stream, which can be used to disable this feature.
This commit is contained in:
parent
5a0a82faf9
commit
619a9ddb33
5 changed files with 69 additions and 44 deletions
|
|
@ -901,6 +901,7 @@ def test_config_json(app_client):
|
|||
"default_cache_ttl": 365 * 24 * 60 * 60,
|
||||
"num_sql_threads": 3,
|
||||
"cache_size_kb": 0,
|
||||
"allow_csv_stream": True,
|
||||
} == response.json
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -59,3 +59,16 @@ def test_table_csv_download(app_client):
|
|||
assert 'text/csv; charset=utf-8' == response.headers['Content-Type']
|
||||
expected_disposition = 'attachment; filename="simple_primary_key.csv"'
|
||||
assert expected_disposition == response.headers['Content-Disposition']
|
||||
|
||||
|
||||
def test_table_csv_stream(app_client):
|
||||
# Without _stream should return header + 100 rows:
|
||||
response = app_client.get(
|
||||
"/fixtures/compound_three_primary_keys.csv?_size=max"
|
||||
)
|
||||
assert 101 == len([b for b in response.body.split(b"\r\n") if b])
|
||||
# With _stream=1 should return header + 1001 rows
|
||||
response = app_client.get(
|
||||
"/fixtures/compound_three_primary_keys.csv?_stream=1"
|
||||
)
|
||||
assert 1002 == len([b for b in response.body.split(b"\r\n") if b])
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue