2018-05-13 12:58:28 +00:00
|
|
|
import asyncio
|
2018-06-15 06:51:23 +00:00
|
|
|
import csv
|
2018-05-13 12:58:28 +00:00
|
|
|
import json
|
|
|
|
import re
|
|
|
|
import sqlite3
|
|
|
|
import time
|
2018-06-15 06:51:23 +00:00
|
|
|
import urllib
|
2018-05-13 12:58:28 +00:00
|
|
|
|
|
|
|
import pint
|
2018-05-13 12:44:22 +00:00
|
|
|
from sanic import response
|
|
|
|
from sanic.exceptions import NotFound
|
2018-05-13 12:58:28 +00:00
|
|
|
from sanic.views import HTTPMethodView
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
from datasette import __version__
|
|
|
|
from datasette.utils import (
|
|
|
|
CustomJSONEncoder,
|
2018-05-18 06:07:45 +00:00
|
|
|
InterruptedError,
|
2018-05-13 12:44:22 +00:00
|
|
|
InvalidSql,
|
|
|
|
path_from_row_pks,
|
|
|
|
path_with_added_args,
|
2018-06-15 06:51:23 +00:00
|
|
|
path_with_format,
|
|
|
|
resolve_table_and_format,
|
2018-05-13 12:58:28 +00:00
|
|
|
to_css_class
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
ureg = pint.UnitRegistry()
|
|
|
|
|
|
|
|
HASH_LENGTH = 7
|
|
|
|
|
|
|
|
|
|
|
|
class DatasetteError(Exception):
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-28 21:24:19 +00:00
|
|
|
def __init__(self, message, title=None, error_dict=None, status=500, template=None, messagge_is_html=False):
|
2018-05-13 12:44:22 +00:00
|
|
|
self.message = message
|
|
|
|
self.title = title
|
|
|
|
self.error_dict = error_dict or {}
|
|
|
|
self.status = status
|
2018-05-28 21:24:19 +00:00
|
|
|
self.messagge_is_html = messagge_is_html
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
class RenderMixin(HTTPMethodView):
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
def render(self, templates, **context):
|
|
|
|
template = self.jinja_env.select_template(templates)
|
2018-05-13 12:55:15 +00:00
|
|
|
select_templates = [
|
|
|
|
"{}{}".format("*" if template_name == template.name else "", template_name)
|
|
|
|
for template_name in templates
|
|
|
|
]
|
2018-05-13 12:44:22 +00:00
|
|
|
return response.html(
|
2018-05-13 12:55:15 +00:00
|
|
|
template.render(
|
|
|
|
{
|
|
|
|
**context,
|
|
|
|
**{
|
|
|
|
"app_css_hash": self.ds.app_css_hash(),
|
|
|
|
"select_templates": select_templates,
|
|
|
|
"zip": zip,
|
|
|
|
}
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
2018-05-13 12:55:15 +00:00
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class BaseView(RenderMixin):
|
2018-05-13 12:55:15 +00:00
|
|
|
re_named_parameter = re.compile(":([a-zA-Z0-9_]+)")
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
def __init__(self, datasette):
|
|
|
|
self.ds = datasette
|
|
|
|
self.files = datasette.files
|
|
|
|
self.jinja_env = datasette.jinja_env
|
|
|
|
self.executor = datasette.executor
|
|
|
|
self.page_size = datasette.page_size
|
|
|
|
self.max_returned_rows = datasette.max_returned_rows
|
|
|
|
|
|
|
|
def table_metadata(self, database, table):
|
|
|
|
"Fetch table-specific metadata."
|
2018-05-13 12:55:15 +00:00
|
|
|
return self.ds.metadata.get("databases", {}).get(database, {}).get(
|
|
|
|
"tables", {}
|
|
|
|
).get(
|
|
|
|
table, {}
|
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
def options(self, request, *args, **kwargs):
|
2018-05-13 12:55:15 +00:00
|
|
|
r = response.text("ok")
|
2018-05-13 12:44:22 +00:00
|
|
|
if self.ds.cors:
|
2018-05-13 12:55:15 +00:00
|
|
|
r.headers["Access-Control-Allow-Origin"] = "*"
|
2018-05-13 12:44:22 +00:00
|
|
|
return r
|
|
|
|
|
|
|
|
def redirect(self, request, path, forward_querystring=True):
|
2018-05-13 12:55:15 +00:00
|
|
|
if request.query_string and "?" not in path and forward_querystring:
|
|
|
|
path = "{}?{}".format(path, request.query_string)
|
2018-05-13 12:44:22 +00:00
|
|
|
r = response.redirect(path)
|
2018-05-13 12:55:15 +00:00
|
|
|
r.headers["Link"] = "<{}>; rel=preload".format(path)
|
2018-05-13 12:44:22 +00:00
|
|
|
if self.ds.cors:
|
2018-05-13 12:55:15 +00:00
|
|
|
r.headers["Access-Control-Allow-Origin"] = "*"
|
2018-05-13 12:44:22 +00:00
|
|
|
return r
|
|
|
|
|
|
|
|
def resolve_db_name(self, db_name, **kwargs):
|
|
|
|
databases = self.ds.inspect()
|
|
|
|
hash = None
|
|
|
|
name = None
|
2018-05-13 12:55:15 +00:00
|
|
|
if "-" in db_name:
|
2018-05-13 12:44:22 +00:00
|
|
|
# Might be name-and-hash, or might just be
|
|
|
|
# a name with a hyphen in it
|
2018-05-13 12:55:15 +00:00
|
|
|
name, hash = db_name.rsplit("-", 1)
|
2018-05-13 12:44:22 +00:00
|
|
|
if name not in databases:
|
|
|
|
# Try the whole name
|
|
|
|
name = db_name
|
|
|
|
hash = None
|
|
|
|
else:
|
|
|
|
name = db_name
|
|
|
|
# Verify the hash
|
|
|
|
try:
|
|
|
|
info = databases[name]
|
|
|
|
except KeyError:
|
2018-05-13 12:55:15 +00:00
|
|
|
raise NotFound("Database not found: {}".format(name))
|
|
|
|
|
|
|
|
expected = info["hash"][:HASH_LENGTH]
|
2018-05-13 12:44:22 +00:00
|
|
|
if expected != hash:
|
2018-06-15 06:51:23 +00:00
|
|
|
if "table_and_format" in kwargs:
|
|
|
|
table, _format = resolve_table_and_format(
|
|
|
|
table_and_format=urllib.parse.unquote_plus(
|
|
|
|
kwargs["table_and_format"]
|
|
|
|
),
|
|
|
|
table_exists=lambda t: self.ds.table_exists(name, t)
|
|
|
|
)
|
|
|
|
kwargs["table"] = table
|
|
|
|
if _format:
|
|
|
|
kwargs["as_format"] = ".{}".format(_format)
|
2018-05-13 12:55:15 +00:00
|
|
|
should_redirect = "/{}-{}".format(name, expected)
|
|
|
|
if "table" in kwargs:
|
2018-06-15 06:51:23 +00:00
|
|
|
should_redirect += "/" + urllib.parse.quote_plus(kwargs["table"])
|
2018-05-13 12:55:15 +00:00
|
|
|
if "pk_path" in kwargs:
|
|
|
|
should_redirect += "/" + kwargs["pk_path"]
|
2018-06-15 06:51:23 +00:00
|
|
|
if "as_format" in kwargs:
|
|
|
|
should_redirect += kwargs["as_format"]
|
2018-05-13 12:55:15 +00:00
|
|
|
if "as_db" in kwargs:
|
|
|
|
should_redirect += kwargs["as_db"]
|
2018-05-13 12:44:22 +00:00
|
|
|
return name, expected, should_redirect
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
return name, expected, None
|
|
|
|
|
|
|
|
def get_templates(self, database, table=None):
|
|
|
|
assert NotImplemented
|
|
|
|
|
|
|
|
async def get(self, request, db_name, **kwargs):
|
|
|
|
name, hash, should_redirect = self.resolve_db_name(db_name, **kwargs)
|
|
|
|
if should_redirect:
|
|
|
|
return self.redirect(request, should_redirect)
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
return await self.view_get(request, name, hash, **kwargs)
|
|
|
|
|
2018-06-15 06:51:23 +00:00
|
|
|
async def as_csv(self, request, name, hash, **kwargs):
|
2018-05-13 12:44:22 +00:00
|
|
|
try:
|
2018-06-15 06:51:23 +00:00
|
|
|
response_or_template_contexts = await self.data(
|
|
|
|
request, name, hash, **kwargs
|
|
|
|
)
|
|
|
|
if isinstance(response_or_template_contexts, response.HTTPResponse):
|
|
|
|
return response_or_template_contexts
|
|
|
|
|
|
|
|
else:
|
|
|
|
data, extra_template_data, templates = response_or_template_contexts
|
|
|
|
except (sqlite3.OperationalError, InvalidSql) as e:
|
|
|
|
raise DatasetteError(str(e), title="Invalid SQL", status=400)
|
|
|
|
|
|
|
|
except (sqlite3.OperationalError) as e:
|
|
|
|
raise DatasetteError(str(e))
|
|
|
|
|
|
|
|
except DatasetteError:
|
|
|
|
raise
|
|
|
|
# Convert rows and columns to CSV
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
headings = data["columns"]
|
|
|
|
# if there are columns_expanded we need to add additional headings
|
|
|
|
columns_expanded = set(data.get("columns_expanded") or [])
|
|
|
|
if columns_expanded:
|
|
|
|
headings = []
|
|
|
|
for column in data["columns"]:
|
|
|
|
headings.append(column)
|
|
|
|
if column in columns_expanded:
|
|
|
|
headings.append("{}_label".format(column))
|
|
|
|
|
2018-06-15 06:51:23 +00:00
|
|
|
async def stream_fn(r):
|
|
|
|
writer = csv.writer(r)
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
writer.writerow(headings)
|
2018-06-15 06:51:23 +00:00
|
|
|
for row in data["rows"]:
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
if not columns_expanded:
|
|
|
|
# Simple path
|
|
|
|
writer.writerow(row)
|
|
|
|
else:
|
|
|
|
# Look for {"value": "label": } dicts and expand
|
|
|
|
new_row = []
|
|
|
|
for cell in row:
|
|
|
|
if isinstance(cell, dict):
|
|
|
|
new_row.append(cell["value"])
|
|
|
|
new_row.append(cell["label"])
|
|
|
|
else:
|
|
|
|
new_row.append(cell)
|
|
|
|
writer.writerow(new_row)
|
2018-06-15 06:51:23 +00:00
|
|
|
|
|
|
|
content_type = "text/plain; charset=utf-8"
|
|
|
|
headers = {}
|
|
|
|
if request.args.get("_dl", None):
|
|
|
|
content_type = "text/csv; charset=utf-8"
|
|
|
|
disposition = 'attachment; filename="{}.csv"'.format(
|
|
|
|
kwargs.get('table', name)
|
|
|
|
)
|
|
|
|
headers["Content-Disposition"] = disposition
|
|
|
|
|
|
|
|
return response.stream(
|
|
|
|
stream_fn,
|
|
|
|
headers=headers,
|
|
|
|
content_type=content_type
|
|
|
|
)
|
|
|
|
|
|
|
|
async def view_get(self, request, name, hash, **kwargs):
|
|
|
|
# If ?_format= is provided, use that as the format
|
|
|
|
_format = request.args.get("_format", None)
|
|
|
|
if not _format:
|
|
|
|
_format = (kwargs.pop("as_format", None) or "").lstrip(".")
|
|
|
|
if "table_and_format" in kwargs:
|
|
|
|
table, _ext_format = resolve_table_and_format(
|
|
|
|
table_and_format=urllib.parse.unquote_plus(
|
|
|
|
kwargs["table_and_format"]
|
|
|
|
),
|
|
|
|
table_exists=lambda t: self.ds.table_exists(name, t)
|
|
|
|
)
|
|
|
|
_format = _format or _ext_format
|
|
|
|
kwargs["table"] = table
|
|
|
|
del kwargs["table_and_format"]
|
|
|
|
|
|
|
|
if _format == "csv":
|
|
|
|
return await self.as_csv(request, name, hash, **kwargs)
|
|
|
|
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
if _format is None:
|
|
|
|
# HTML views default to expanding all forign key labels
|
|
|
|
kwargs['default_labels'] = True
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
extra_template_data = {}
|
|
|
|
start = time.time()
|
|
|
|
status_code = 200
|
|
|
|
templates = []
|
|
|
|
try:
|
|
|
|
response_or_template_contexts = await self.data(
|
|
|
|
request, name, hash, **kwargs
|
|
|
|
)
|
|
|
|
if isinstance(response_or_template_contexts, response.HTTPResponse):
|
|
|
|
return response_or_template_contexts
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
|
|
|
data, extra_template_data, templates = response_or_template_contexts
|
2018-05-18 06:07:45 +00:00
|
|
|
except InterruptedError as e:
|
2018-05-28 21:24:19 +00:00
|
|
|
raise DatasetteError("""
|
|
|
|
SQL query took too long. The time limit is controlled by the
|
|
|
|
<a href="https://datasette.readthedocs.io/en/stable/config.html#sql-time-limit-ms">sql_time_limit_ms</a>
|
|
|
|
configuration option.
|
|
|
|
""", title="SQL Interrupted", status=400, messagge_is_html=True)
|
2018-05-13 12:44:22 +00:00
|
|
|
except (sqlite3.OperationalError, InvalidSql) as e:
|
2018-05-13 12:55:15 +00:00
|
|
|
raise DatasetteError(str(e), title="Invalid SQL", status=400)
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
except (sqlite3.OperationalError) as e:
|
|
|
|
raise DatasetteError(str(e))
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
except DatasetteError:
|
|
|
|
raise
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
end = time.time()
|
2018-05-13 12:55:15 +00:00
|
|
|
data["query_ms"] = (end - start) * 1000
|
|
|
|
for key in ("source", "source_url", "license", "license_url"):
|
2018-05-13 12:44:22 +00:00
|
|
|
value = self.ds.metadata.get(key)
|
|
|
|
if value:
|
|
|
|
data[key] = value
|
2018-06-15 06:51:23 +00:00
|
|
|
if _format in ("json", "jsono"):
|
2018-05-13 12:44:22 +00:00
|
|
|
# Special case for .jsono extension - redirect to _shape=objects
|
2018-06-15 06:51:23 +00:00
|
|
|
if _format == "jsono":
|
2018-05-13 12:44:22 +00:00
|
|
|
return self.redirect(
|
|
|
|
request,
|
|
|
|
path_with_added_args(
|
|
|
|
request,
|
2018-05-13 12:55:15 +00:00
|
|
|
{"_shape": "objects"},
|
|
|
|
path=request.path.rsplit(".jsono", 1)[0] + ".json",
|
2018-05-13 12:44:22 +00:00
|
|
|
),
|
2018-05-13 12:55:15 +00:00
|
|
|
forward_querystring=False,
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-28 18:08:39 +00:00
|
|
|
# Handle the _json= parameter which may modify data["rows"]
|
|
|
|
json_cols = []
|
|
|
|
if "_json" in request.args:
|
|
|
|
json_cols = request.args["_json"]
|
|
|
|
if json_cols and "rows" in data and "columns" in data:
|
|
|
|
data["rows"] = convert_specific_columns_to_json(
|
|
|
|
data["rows"], data["columns"], json_cols,
|
|
|
|
)
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
# Deal with the _shape option
|
2018-05-13 12:55:15 +00:00
|
|
|
shape = request.args.get("_shape", "arrays")
|
2018-05-27 00:32:15 +00:00
|
|
|
if shape == "arrayfirst":
|
|
|
|
data = [row[0] for row in data["rows"]]
|
|
|
|
elif shape in ("objects", "object", "array"):
|
2018-05-13 12:55:15 +00:00
|
|
|
columns = data.get("columns")
|
|
|
|
rows = data.get("rows")
|
2018-05-13 12:44:22 +00:00
|
|
|
if rows and columns:
|
2018-05-13 12:55:15 +00:00
|
|
|
data["rows"] = [dict(zip(columns, row)) for row in rows]
|
|
|
|
if shape == "object":
|
2018-05-13 12:44:22 +00:00
|
|
|
error = None
|
2018-05-13 12:55:15 +00:00
|
|
|
if "primary_keys" not in data:
|
|
|
|
error = "_shape=object is only available on tables"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2018-05-13 12:55:15 +00:00
|
|
|
pks = data["primary_keys"]
|
2018-05-13 12:44:22 +00:00
|
|
|
if not pks:
|
2018-05-13 12:55:15 +00:00
|
|
|
error = "_shape=object not available for tables with no primary keys"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
|
|
|
object_rows = {}
|
2018-05-13 12:55:15 +00:00
|
|
|
for row in data["rows"]:
|
2018-05-13 12:44:22 +00:00
|
|
|
pk_string = path_from_row_pks(row, pks, not pks)
|
|
|
|
object_rows[pk_string] = row
|
|
|
|
data = object_rows
|
|
|
|
if error:
|
|
|
|
data = {
|
2018-05-13 12:55:15 +00:00
|
|
|
"ok": False,
|
|
|
|
"error": error,
|
|
|
|
"database": name,
|
|
|
|
"database_hash": hash,
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
2018-05-13 12:55:15 +00:00
|
|
|
elif shape == "array":
|
|
|
|
data = data["rows"]
|
|
|
|
elif shape == "arrays":
|
2018-05-13 12:44:22 +00:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
status_code = 400
|
|
|
|
data = {
|
2018-05-13 12:55:15 +00:00
|
|
|
"ok": False,
|
|
|
|
"error": "Invalid _shape: {}".format(shape),
|
|
|
|
"status": 400,
|
|
|
|
"title": None,
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
|
|
|
headers = {}
|
|
|
|
if self.ds.cors:
|
2018-05-13 12:55:15 +00:00
|
|
|
headers["Access-Control-Allow-Origin"] = "*"
|
2018-05-13 12:44:22 +00:00
|
|
|
r = response.HTTPResponse(
|
2018-05-13 12:55:15 +00:00
|
|
|
json.dumps(data, cls=CustomJSONEncoder),
|
2018-05-13 12:44:22 +00:00
|
|
|
status=status_code,
|
2018-05-13 12:55:15 +00:00
|
|
|
content_type="application/json",
|
2018-05-13 12:44:22 +00:00
|
|
|
headers=headers,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
extras = {}
|
|
|
|
if callable(extra_template_data):
|
|
|
|
extras = extra_template_data()
|
|
|
|
if asyncio.iscoroutine(extras):
|
|
|
|
extras = await extras
|
|
|
|
else:
|
|
|
|
extras = extra_template_data
|
|
|
|
context = {
|
|
|
|
**data,
|
|
|
|
**extras,
|
|
|
|
**{
|
2018-06-15 06:51:23 +00:00
|
|
|
"url_json": path_with_format(request, "json"),
|
|
|
|
"url_csv": path_with_format(request, "csv", {
|
|
|
|
"_size": "max"
|
|
|
|
}),
|
|
|
|
"url_csv_dl": path_with_format(request, "csv", {
|
|
|
|
"_dl": "1",
|
|
|
|
"_size": "max"
|
|
|
|
}),
|
2018-05-13 12:55:15 +00:00
|
|
|
"extra_css_urls": self.ds.extra_css_urls(),
|
|
|
|
"extra_js_urls": self.ds.extra_js_urls(),
|
|
|
|
"datasette_version": __version__,
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-13 12:55:15 +00:00
|
|
|
if "metadata" not in context:
|
|
|
|
context["metadata"] = self.ds.metadata
|
|
|
|
r = self.render(templates, **context)
|
2018-05-13 12:44:22 +00:00
|
|
|
r.status = status_code
|
|
|
|
# Set far-future cache expiry
|
|
|
|
if self.ds.cache_headers:
|
2018-05-26 22:17:33 +00:00
|
|
|
ttl = request.args.get("_ttl", None)
|
|
|
|
if ttl is None or not ttl.isdigit():
|
|
|
|
ttl = self.ds.config["default_cache_ttl"]
|
|
|
|
else:
|
|
|
|
ttl = int(ttl)
|
|
|
|
if ttl == 0:
|
|
|
|
ttl_header = 'no-cache'
|
|
|
|
else:
|
|
|
|
ttl_header = 'max-age={}'.format(ttl)
|
|
|
|
r.headers["Cache-Control"] = ttl_header
|
|
|
|
r.headers["Referrer-Policy"] = "no-referrer"
|
2018-05-13 12:44:22 +00:00
|
|
|
return r
|
|
|
|
|
2018-05-13 12:55:15 +00:00
|
|
|
async def custom_sql(
|
|
|
|
self, request, name, hash, sql, editable=True, canned_query=None
|
|
|
|
):
|
2018-05-13 12:44:22 +00:00
|
|
|
params = request.raw_args
|
2018-05-13 12:55:15 +00:00
|
|
|
if "sql" in params:
|
|
|
|
params.pop("sql")
|
|
|
|
if "_shape" in params:
|
|
|
|
params.pop("_shape")
|
2018-05-13 12:44:22 +00:00
|
|
|
# Extract any :named parameters
|
|
|
|
named_parameters = self.re_named_parameter.findall(sql)
|
|
|
|
named_parameter_values = {
|
2018-05-13 12:55:15 +00:00
|
|
|
named_parameter: params.get(named_parameter) or ""
|
2018-05-13 12:44:22 +00:00
|
|
|
for named_parameter in named_parameters
|
|
|
|
}
|
|
|
|
|
|
|
|
# Set to blank string if missing from params
|
|
|
|
for named_parameter in named_parameters:
|
|
|
|
if named_parameter not in params:
|
2018-05-13 12:55:15 +00:00
|
|
|
params[named_parameter] = ""
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
extra_args = {}
|
2018-05-13 12:55:15 +00:00
|
|
|
if params.get("_timelimit"):
|
|
|
|
extra_args["custom_time_limit"] = int(params["_timelimit"])
|
2018-05-25 00:15:37 +00:00
|
|
|
results = await self.ds.execute(
|
2018-05-13 12:44:22 +00:00
|
|
|
name, sql, params, truncate=True, **extra_args
|
|
|
|
)
|
2018-05-25 00:15:37 +00:00
|
|
|
columns = [r[0] for r in results.description]
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2018-05-13 12:55:15 +00:00
|
|
|
templates = ["query-{}.html".format(to_css_class(name)), "query.html"]
|
2018-05-13 12:44:22 +00:00
|
|
|
if canned_query:
|
2018-05-13 12:55:15 +00:00
|
|
|
templates.insert(
|
|
|
|
0,
|
|
|
|
"query-{}-{}.html".format(
|
|
|
|
to_css_class(name), to_css_class(canned_query)
|
|
|
|
),
|
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
return {
|
2018-05-13 12:55:15 +00:00
|
|
|
"database": name,
|
2018-05-25 00:15:37 +00:00
|
|
|
"rows": results.rows,
|
|
|
|
"truncated": results.truncated,
|
2018-05-13 12:55:15 +00:00
|
|
|
"columns": columns,
|
|
|
|
"query": {"sql": sql, "params": params},
|
2018-05-13 12:44:22 +00:00
|
|
|
}, {
|
2018-05-13 12:55:15 +00:00
|
|
|
"database_hash": hash,
|
|
|
|
"custom_sql": True,
|
|
|
|
"named_parameter_values": named_parameter_values,
|
|
|
|
"editable": editable,
|
|
|
|
"canned_query": canned_query,
|
2018-05-25 22:08:57 +00:00
|
|
|
"config": self.ds.config,
|
2018-05-13 12:44:22 +00:00
|
|
|
}, templates
|
2018-05-28 18:08:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
def convert_specific_columns_to_json(rows, columns, json_cols):
|
|
|
|
json_cols = set(json_cols)
|
|
|
|
if not json_cols.intersection(columns):
|
|
|
|
return rows
|
|
|
|
new_rows = []
|
|
|
|
for row in rows:
|
|
|
|
new_row = []
|
|
|
|
for value, column in zip(row, columns):
|
|
|
|
if column in json_cols:
|
|
|
|
try:
|
|
|
|
value = json.loads(value)
|
|
|
|
except (TypeError, ValueError) as e:
|
|
|
|
print(e)
|
|
|
|
pass
|
|
|
|
new_row.append(value)
|
|
|
|
new_rows.append(new_row)
|
|
|
|
return new_rows
|