2022-04-26 22:48:56 +00:00
|
|
|
import asyncio
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
import itertools
|
2019-05-23 05:44:34 +00:00
|
|
|
import json
|
2018-05-13 12:58:28 +00:00
|
|
|
|
2021-05-24 01:41:50 +00:00
|
|
|
import markupsafe
|
2018-05-13 12:58:28 +00:00
|
|
|
|
2018-08-05 00:14:56 +00:00
|
|
|
from datasette.plugins import pm
|
2020-05-08 16:05:46 +00:00
|
|
|
from datasette.database import QueryInterrupted
|
2022-04-26 22:48:56 +00:00
|
|
|
from datasette import tracer
|
2018-05-13 12:44:22 +00:00
|
|
|
from datasette.utils import (
|
2020-10-30 05:16:41 +00:00
|
|
|
await_me_maybe,
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
CustomRow,
|
2018-06-18 06:03:22 +00:00
|
|
|
append_querystring,
|
2018-05-13 12:44:22 +00:00
|
|
|
compound_keys_after_sql,
|
2022-04-12 18:44:12 +00:00
|
|
|
format_bytes,
|
2022-03-15 18:01:57 +00:00
|
|
|
tilde_decode,
|
|
|
|
tilde_encode,
|
2018-05-13 12:44:22 +00:00
|
|
|
escape_sqlite,
|
|
|
|
filters_should_redirect,
|
2022-04-12 18:44:12 +00:00
|
|
|
format_bytes,
|
2018-05-13 12:44:22 +00:00
|
|
|
is_url,
|
|
|
|
path_from_row_pks,
|
|
|
|
path_with_added_args,
|
2018-05-14 20:42:10 +00:00
|
|
|
path_with_removed_args,
|
2018-05-15 09:34:45 +00:00
|
|
|
path_with_replaced_args,
|
2018-05-13 12:44:22 +00:00
|
|
|
to_css_class,
|
2022-09-06 23:50:43 +00:00
|
|
|
truncate_url,
|
2018-05-18 06:07:45 +00:00
|
|
|
urlsafe_components,
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
value_as_boolean,
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
2022-10-27 03:57:02 +00:00
|
|
|
from datasette.utils.asgi import BadRequest, Forbidden, NotFound, Response
|
2019-04-15 21:51:20 +00:00
|
|
|
from datasette.filters import Filters
|
2022-10-30 06:03:45 +00:00
|
|
|
import sqlite_utils
|
2022-11-03 23:36:43 +00:00
|
|
|
from .base import BaseView, DataView, DatasetteError, ureg, _error
|
2020-04-03 01:12:13 +00:00
|
|
|
from .database import QueryView
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2019-05-04 02:15:14 +00:00
|
|
|
LINK_WITH_LABEL = (
|
2020-03-25 00:18:43 +00:00
|
|
|
'<a href="{base_url}{database}/{table}/{link_id}">{label}</a> <em>{id}</em>'
|
2019-05-04 02:15:14 +00:00
|
|
|
)
|
2020-03-25 00:18:43 +00:00
|
|
|
LINK_WITH_VALUE = '<a href="{base_url}{database}/{table}/{link_id}">{id}</a>'
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2019-06-25 12:02:42 +00:00
|
|
|
class Row:
|
|
|
|
def __init__(self, cells):
|
|
|
|
self.cells = cells
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return iter(self.cells)
|
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
for cell in self.cells:
|
|
|
|
if cell["column"] == key:
|
2019-06-25 12:21:10 +00:00
|
|
|
return cell["raw"]
|
2019-06-25 12:02:42 +00:00
|
|
|
raise KeyError
|
|
|
|
|
2019-06-25 12:21:10 +00:00
|
|
|
def display(self, key):
|
2019-06-25 12:02:42 +00:00
|
|
|
for cell in self.cells:
|
|
|
|
if cell["column"] == key:
|
2019-06-25 12:21:10 +00:00
|
|
|
return cell["value"]
|
2019-06-25 12:02:42 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
d = {
|
|
|
|
key: self[key]
|
|
|
|
for key in [
|
|
|
|
c["column"] for c in self.cells if not c.get("is_special_link_column")
|
|
|
|
]
|
|
|
|
}
|
|
|
|
return json.dumps(d, default=repr, indent=2)
|
|
|
|
|
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
class TableView(DataView):
|
|
|
|
name = "table"
|
|
|
|
|
2022-04-26 20:56:27 +00:00
|
|
|
async def sortable_columns_for_table(self, database_name, table_name, use_rowid):
|
|
|
|
db = self.ds.databases[database_name]
|
|
|
|
table_metadata = self.ds.table_metadata(database_name, table_name)
|
2018-05-13 12:55:15 +00:00
|
|
|
if "sortable_columns" in table_metadata:
|
|
|
|
sortable_columns = set(table_metadata["sortable_columns"])
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2022-04-26 20:56:27 +00:00
|
|
|
sortable_columns = set(await db.table_columns(table_name))
|
2018-05-13 12:44:22 +00:00
|
|
|
if use_rowid:
|
2018-05-13 12:55:15 +00:00
|
|
|
sortable_columns.add("rowid")
|
2018-05-13 12:44:22 +00:00
|
|
|
return sortable_columns
|
|
|
|
|
2022-04-26 20:56:27 +00:00
|
|
|
async def expandable_columns(self, database_name, table_name):
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
# Returns list of (fk_dict, label_column-or-None) pairs for that table
|
|
|
|
expandables = []
|
2022-04-26 20:56:27 +00:00
|
|
|
db = self.ds.databases[database_name]
|
|
|
|
for fk in await db.foreign_keys_for_table(table_name):
|
2019-05-27 04:56:43 +00:00
|
|
|
label_column = await db.label_column_for_table(fk["other_table"])
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
expandables.append((fk, label_column))
|
|
|
|
return expandables
|
|
|
|
|
2022-03-19 00:12:03 +00:00
|
|
|
async def post(self, request):
|
2022-11-18 22:46:25 +00:00
|
|
|
from datasette.app import TableNotFound
|
|
|
|
|
2022-03-20 00:11:17 +00:00
|
|
|
try:
|
2022-11-18 22:46:25 +00:00
|
|
|
resolved = await self.ds.resolve_table(request)
|
|
|
|
except TableNotFound as e:
|
|
|
|
# Was this actually a canned query?
|
|
|
|
canned_query = await self.ds.get_canned_query(
|
|
|
|
e.database_name, e.table, request.actor
|
2022-10-27 03:57:02 +00:00
|
|
|
)
|
2022-11-18 22:46:25 +00:00
|
|
|
if canned_query:
|
|
|
|
# Handle POST to a canned query
|
|
|
|
return await QueryView(self.ds).data(
|
|
|
|
request,
|
|
|
|
canned_query["sql"],
|
|
|
|
metadata=canned_query,
|
|
|
|
editable=False,
|
|
|
|
canned_query=e.table,
|
|
|
|
named_parameters=canned_query.get("params"),
|
|
|
|
write=bool(canned_query.get("write")),
|
|
|
|
)
|
2022-10-27 03:57:02 +00:00
|
|
|
|
2022-11-18 22:46:25 +00:00
|
|
|
# Handle POST to a table
|
|
|
|
return await self.table_post(
|
|
|
|
request, resolved.db, resolved.db.name, resolved.table
|
|
|
|
)
|
|
|
|
|
|
|
|
async def table_post(self, request, db, database_name, table_name):
|
2022-10-27 03:57:02 +00:00
|
|
|
# Must have insert-row permission
|
|
|
|
if not await self.ds.permission_allowed(
|
|
|
|
request.actor, "insert-row", resource=(database_name, table_name)
|
|
|
|
):
|
|
|
|
raise Forbidden("Permission denied")
|
|
|
|
if request.headers.get("content-type") != "application/json":
|
|
|
|
# TODO: handle form-encoded data
|
|
|
|
raise BadRequest("Must send JSON data")
|
|
|
|
data = json.loads(await request.post_body())
|
2022-10-27 19:06:18 +00:00
|
|
|
if "insert" not in data:
|
|
|
|
raise BadRequest('Must send a "insert" key containing a dictionary')
|
|
|
|
row = data["insert"]
|
2022-10-27 03:57:02 +00:00
|
|
|
if not isinstance(row, dict):
|
2022-10-27 19:06:18 +00:00
|
|
|
raise BadRequest("insert must be a dictionary")
|
2022-10-27 03:57:02 +00:00
|
|
|
# Verify all columns exist
|
|
|
|
columns = await db.table_columns(table_name)
|
|
|
|
pks = await db.primary_keys(table_name)
|
|
|
|
for key in row:
|
|
|
|
if key not in columns:
|
|
|
|
raise BadRequest("Column not found: {}".format(key))
|
|
|
|
if key in pks:
|
|
|
|
raise BadRequest(
|
|
|
|
"Cannot insert into primary key column: {}".format(key)
|
|
|
|
)
|
|
|
|
# Perform the insert
|
|
|
|
sql = "INSERT INTO [{table}] ({columns}) VALUES ({values})".format(
|
|
|
|
table=escape_sqlite(table_name),
|
|
|
|
columns=", ".join(escape_sqlite(c) for c in row),
|
|
|
|
values=", ".join("?" for c in row),
|
|
|
|
)
|
|
|
|
cursor = await db.execute_write(sql, list(row.values()))
|
|
|
|
# Return the new row
|
|
|
|
rowid = cursor.lastrowid
|
|
|
|
new_row = (
|
|
|
|
await db.execute(
|
|
|
|
"SELECT * FROM [{table}] WHERE rowid = ?".format(
|
|
|
|
table=escape_sqlite(table_name)
|
|
|
|
),
|
|
|
|
[rowid],
|
|
|
|
)
|
|
|
|
).first()
|
|
|
|
return Response.json(
|
|
|
|
{
|
2022-10-27 19:06:18 +00:00
|
|
|
"inserted_row": dict(new_row),
|
2022-10-27 03:57:02 +00:00
|
|
|
},
|
|
|
|
status=201,
|
2020-06-03 15:16:50 +00:00
|
|
|
)
|
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
async def columns_to_select(self, table_columns, pks, request):
|
|
|
|
columns = list(table_columns)
|
|
|
|
if "_col" in request.args:
|
|
|
|
columns = list(pks)
|
|
|
|
_cols = request.args.getlist("_col")
|
|
|
|
bad_columns = [column for column in _cols if column not in table_columns]
|
|
|
|
if bad_columns:
|
|
|
|
raise DatasetteError(
|
|
|
|
"_col={} - invalid columns".format(", ".join(bad_columns)),
|
|
|
|
status=400,
|
|
|
|
)
|
|
|
|
# De-duplicate maintaining order:
|
|
|
|
columns.extend(dict.fromkeys(_cols))
|
|
|
|
if "_nocol" in request.args:
|
|
|
|
# Return all columns EXCEPT these
|
|
|
|
bad_columns = [
|
|
|
|
column
|
|
|
|
for column in request.args.getlist("_nocol")
|
|
|
|
if (column not in table_columns) or (column in pks)
|
|
|
|
]
|
|
|
|
if bad_columns:
|
|
|
|
raise DatasetteError(
|
|
|
|
"_nocol={} - invalid columns".format(", ".join(bad_columns)),
|
|
|
|
status=400,
|
|
|
|
)
|
|
|
|
tmp_columns = [
|
|
|
|
column
|
|
|
|
for column in columns
|
|
|
|
if column not in request.args.getlist("_nocol")
|
|
|
|
]
|
|
|
|
columns = tmp_columns
|
|
|
|
return columns
|
|
|
|
|
2019-05-04 02:15:14 +00:00
|
|
|
async def data(
|
|
|
|
self,
|
|
|
|
request,
|
|
|
|
default_labels=False,
|
|
|
|
_next=None,
|
|
|
|
_size=None,
|
2022-04-26 22:48:56 +00:00
|
|
|
):
|
|
|
|
with tracer.trace_child_tasks():
|
|
|
|
return await self._data_traced(request, default_labels, _next, _size)
|
|
|
|
|
|
|
|
async def _data_traced(
|
|
|
|
self,
|
|
|
|
request,
|
|
|
|
default_labels=False,
|
|
|
|
_next=None,
|
|
|
|
_size=None,
|
2019-05-04 02:15:14 +00:00
|
|
|
):
|
2022-11-18 22:46:25 +00:00
|
|
|
from datasette.app import TableNotFound
|
|
|
|
|
2022-03-19 00:12:03 +00:00
|
|
|
try:
|
2022-11-18 22:46:25 +00:00
|
|
|
resolved = await self.ds.resolve_table(request)
|
|
|
|
except TableNotFound as e:
|
|
|
|
# Was this actually a canned query?
|
|
|
|
canned_query = await self.ds.get_canned_query(
|
|
|
|
e.database_name, e.table, request.actor
|
|
|
|
)
|
|
|
|
# If this is a canned query, not a table, then dispatch to QueryView instead
|
|
|
|
if canned_query:
|
|
|
|
return await QueryView(self.ds).data(
|
|
|
|
request,
|
|
|
|
canned_query["sql"],
|
|
|
|
metadata=canned_query,
|
|
|
|
editable=False,
|
|
|
|
canned_query=e.table,
|
|
|
|
named_parameters=canned_query.get("params"),
|
|
|
|
write=bool(canned_query.get("write")),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
table_name = resolved.table
|
|
|
|
db = resolved.db
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name = db.name
|
2022-03-19 00:12:03 +00:00
|
|
|
|
2022-04-26 22:48:56 +00:00
|
|
|
# For performance profiling purposes, ?_noparallel=1 turns off asyncio.gather
|
|
|
|
async def _gather_parallel(*args):
|
|
|
|
return await asyncio.gather(*args)
|
|
|
|
|
|
|
|
async def _gather_sequential(*args):
|
|
|
|
results = []
|
|
|
|
for fn in args:
|
|
|
|
results.append(await fn)
|
|
|
|
return results
|
|
|
|
|
|
|
|
gather = (
|
|
|
|
_gather_sequential if request.args.get("_noparallel") else _gather_parallel
|
|
|
|
)
|
|
|
|
|
|
|
|
is_view, table_exists = map(
|
|
|
|
bool,
|
|
|
|
await gather(
|
|
|
|
db.get_view_definition(table_name), db.table_exists(table_name)
|
|
|
|
),
|
|
|
|
)
|
2021-12-16 21:43:44 +00:00
|
|
|
|
|
|
|
# If table or view not found, return 404
|
2019-04-07 03:47:10 +00:00
|
|
|
if not is_view and not table_exists:
|
2022-04-26 20:56:27 +00:00
|
|
|
raise NotFound(f"Table not found: {table_name}")
|
2019-05-27 04:56:43 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Ensure user has permission to view this table
|
2022-10-24 02:11:33 +00:00
|
|
|
visible, private = await self.ds.check_visibility(
|
2022-03-21 17:13:16 +00:00
|
|
|
request.actor,
|
2022-10-24 02:11:33 +00:00
|
|
|
permissions=[
|
2022-04-26 20:56:27 +00:00
|
|
|
("view-table", (database_name, table_name)),
|
|
|
|
("view-database", database_name),
|
2020-06-30 23:40:50 +00:00
|
|
|
"view-instance",
|
|
|
|
],
|
|
|
|
)
|
2022-10-24 02:11:33 +00:00
|
|
|
if not visible:
|
|
|
|
raise Forbidden("You do not have permission to view this table")
|
2020-06-08 18:07:11 +00:00
|
|
|
|
2021-12-22 20:22:44 +00:00
|
|
|
# Handle ?_filter_column and redirect, if present
|
|
|
|
redirect_params = filters_should_redirect(request.args)
|
|
|
|
if redirect_params:
|
|
|
|
return self.redirect(
|
|
|
|
request,
|
2022-11-11 06:49:54 +00:00
|
|
|
self.ds.urls.path(path_with_added_args(request, redirect_params)),
|
2021-12-22 20:22:44 +00:00
|
|
|
forward_querystring=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
# If ?_sort_by_desc=on (from checkbox) redirect to _sort_desc=(_sort)
|
|
|
|
if "_sort_by_desc" in request.args:
|
|
|
|
return self.redirect(
|
|
|
|
request,
|
2022-11-11 06:49:54 +00:00
|
|
|
self.ds.urls.path(
|
|
|
|
path_with_added_args(
|
|
|
|
request,
|
|
|
|
{
|
|
|
|
"_sort_desc": request.args.get("_sort"),
|
|
|
|
"_sort_by_desc": None,
|
|
|
|
"_sort": None,
|
|
|
|
},
|
|
|
|
)
|
2021-12-22 20:22:44 +00:00
|
|
|
),
|
|
|
|
forward_querystring=False,
|
|
|
|
)
|
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Introspect columns and primary keys for table
|
2022-04-26 20:56:27 +00:00
|
|
|
pks = await db.primary_keys(table_name)
|
|
|
|
table_columns = await db.table_columns(table_name)
|
2021-05-31 02:31:14 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Take ?_col= and ?_nocol= into account
|
|
|
|
specified_columns = await self.columns_to_select(table_columns, pks, request)
|
2021-05-31 02:31:14 +00:00
|
|
|
select_specified_columns = ", ".join(
|
|
|
|
escape_sqlite(t) for t in specified_columns
|
|
|
|
)
|
|
|
|
select_all_columns = ", ".join(escape_sqlite(t) for t in table_columns)
|
2019-11-04 23:03:48 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# rowid tables (no specified primary key) need a different SELECT
|
2018-05-13 12:44:22 +00:00
|
|
|
use_rowid = not pks and not is_view
|
|
|
|
if use_rowid:
|
2021-05-31 02:31:14 +00:00
|
|
|
select_specified_columns = f"rowid, {select_specified_columns}"
|
|
|
|
select_all_columns = f"rowid, {select_all_columns}"
|
2018-05-13 12:55:15 +00:00
|
|
|
order_by = "rowid"
|
|
|
|
order_by_pks = "rowid"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2018-05-13 12:55:15 +00:00
|
|
|
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
|
2018-05-13 12:44:22 +00:00
|
|
|
order_by = order_by_pks
|
|
|
|
|
|
|
|
if is_view:
|
2018-05-13 12:55:15 +00:00
|
|
|
order_by = ""
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-06-02 02:53:00 +00:00
|
|
|
nocount = request.args.get("_nocount")
|
|
|
|
nofacet = request.args.get("_nofacet")
|
2021-12-16 19:24:54 +00:00
|
|
|
nosuggest = request.args.get("_nosuggest")
|
2021-06-02 02:53:00 +00:00
|
|
|
|
|
|
|
if request.args.get("_shape") in ("array", "object"):
|
|
|
|
nocount = True
|
|
|
|
nofacet = True
|
|
|
|
|
2022-04-26 20:56:27 +00:00
|
|
|
table_metadata = self.ds.table_metadata(database_name, table_name)
|
2018-05-13 12:55:15 +00:00
|
|
|
units = table_metadata.get("units", {})
|
2021-12-16 22:00:29 +00:00
|
|
|
|
2021-12-22 20:22:44 +00:00
|
|
|
# Arguments that start with _ and don't contain a __ are
|
|
|
|
# special - things like ?_search= - and should not be
|
|
|
|
# treated as filters.
|
|
|
|
filter_args = []
|
|
|
|
for key in request.args:
|
|
|
|
if not (key.startswith("_") and "__" not in key):
|
|
|
|
for v in request.args.getlist(key):
|
|
|
|
filter_args.append((key, v))
|
|
|
|
|
2021-12-16 22:00:29 +00:00
|
|
|
# Build where clauses from query string arguments
|
2021-12-22 20:22:44 +00:00
|
|
|
filters = Filters(sorted(filter_args), units, ureg)
|
2022-04-26 20:56:27 +00:00
|
|
|
where_clauses, params = filters.build_where_clauses(table_name)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-17 23:28:26 +00:00
|
|
|
# Execute filters_from_request plugin hooks - including the default
|
|
|
|
# ones that live in datasette/filters.py
|
2021-12-17 19:02:14 +00:00
|
|
|
extra_context_from_filters = {}
|
2019-05-23 05:44:34 +00:00
|
|
|
extra_human_descriptions = []
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-17 19:02:14 +00:00
|
|
|
for hook in pm.hook.filters_from_request(
|
|
|
|
request=request,
|
2022-04-26 20:56:27 +00:00
|
|
|
table=table_name,
|
|
|
|
database=database_name,
|
2021-12-17 19:02:14 +00:00
|
|
|
datasette=self.ds,
|
|
|
|
):
|
|
|
|
filter_arguments = await await_me_maybe(hook)
|
|
|
|
if filter_arguments:
|
|
|
|
where_clauses.extend(filter_arguments.where_clauses)
|
|
|
|
params.update(filter_arguments.params)
|
|
|
|
extra_human_descriptions.extend(filter_arguments.human_descriptions)
|
|
|
|
extra_context_from_filters.update(filter_arguments.extra_context)
|
|
|
|
|
|
|
|
# Deal with custom sort orders
|
2019-05-04 02:15:14 +00:00
|
|
|
sortable_columns = await self.sortable_columns_for_table(
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name, table_name, use_rowid
|
2019-05-04 02:15:14 +00:00
|
|
|
)
|
2021-12-22 20:22:44 +00:00
|
|
|
sort = request.args.get("_sort")
|
|
|
|
sort_desc = request.args.get("_sort_desc")
|
2020-03-22 02:40:29 +00:00
|
|
|
|
|
|
|
if not sort and not sort_desc:
|
|
|
|
sort = table_metadata.get("sort")
|
|
|
|
sort_desc = table_metadata.get("sort_desc")
|
2020-03-21 23:57:37 +00:00
|
|
|
|
|
|
|
if sort and sort_desc:
|
|
|
|
raise DatasetteError("Cannot use _sort and _sort_desc at the same time")
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
if sort:
|
|
|
|
if sort not in sortable_columns:
|
2020-11-15 23:24:22 +00:00
|
|
|
raise DatasetteError(f"Cannot sort table by {sort}")
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
order_by = escape_sqlite(sort)
|
2020-03-21 23:57:37 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
if sort_desc:
|
|
|
|
if sort_desc not in sortable_columns:
|
2020-11-15 23:24:22 +00:00
|
|
|
raise DatasetteError(f"Cannot sort table by {sort_desc}")
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2020-11-15 23:24:22 +00:00
|
|
|
order_by = f"{escape_sqlite(sort_desc)} desc"
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2018-05-13 12:55:15 +00:00
|
|
|
from_sql = "from {table_name} {where}".format(
|
2022-04-26 20:56:27 +00:00
|
|
|
table_name=escape_sqlite(table_name),
|
2019-05-04 02:15:14 +00:00
|
|
|
where=("where {} ".format(" and ".join(where_clauses)))
|
|
|
|
if where_clauses
|
|
|
|
else "",
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
# Copy of params so we can mutate them later:
|
2018-05-18 06:07:45 +00:00
|
|
|
from_sql_params = dict(**params)
|
|
|
|
|
2020-11-15 23:24:22 +00:00
|
|
|
count_sql = f"select count(*) {from_sql}"
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-16 22:00:29 +00:00
|
|
|
# Handle pagination driven by ?_next=
|
2021-12-22 20:22:44 +00:00
|
|
|
_next = _next or request.args.get("_next")
|
2018-05-13 12:55:15 +00:00
|
|
|
offset = ""
|
2018-05-13 12:44:22 +00:00
|
|
|
if _next:
|
2021-10-10 01:14:56 +00:00
|
|
|
sort_value = None
|
2018-05-13 12:44:22 +00:00
|
|
|
if is_view:
|
|
|
|
# _next is an offset
|
2020-11-15 23:24:22 +00:00
|
|
|
offset = f" offset {int(_next)}"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
|
|
|
components = urlsafe_components(_next)
|
2021-10-10 01:14:56 +00:00
|
|
|
# If a sort order is applied and there are multiple components,
|
|
|
|
# the first of these is the sort value
|
|
|
|
if (sort or sort_desc) and (len(components) > 1):
|
2018-05-13 12:44:22 +00:00
|
|
|
sort_value = components[0]
|
|
|
|
# Special case for if non-urlencoded first token was $null
|
2018-05-13 12:55:15 +00:00
|
|
|
if _next.split(",")[0] == "$null":
|
2018-05-13 12:44:22 +00:00
|
|
|
sort_value = None
|
|
|
|
components = components[1:]
|
|
|
|
|
|
|
|
# Figure out the SQL for next-based-on-primary-key first
|
|
|
|
next_by_pk_clauses = []
|
|
|
|
if use_rowid:
|
2020-11-15 23:24:22 +00:00
|
|
|
next_by_pk_clauses.append(f"rowid > :p{len(params)}")
|
|
|
|
params[f"p{len(params)}"] = components[0]
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
|
|
|
# Apply the tie-breaker based on primary keys
|
|
|
|
if len(components) == len(pks):
|
|
|
|
param_len = len(params)
|
2018-05-13 12:55:15 +00:00
|
|
|
next_by_pk_clauses.append(
|
|
|
|
compound_keys_after_sql(pks, param_len)
|
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
for i, pk_value in enumerate(components):
|
2020-11-15 23:24:22 +00:00
|
|
|
params[f"p{param_len + i}"] = pk_value
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
# Now add the sort SQL, which may incorporate next_by_pk_clauses
|
|
|
|
if sort or sort_desc:
|
|
|
|
if sort_value is None:
|
|
|
|
if sort_desc:
|
|
|
|
# Just items where column is null ordered by pk
|
|
|
|
where_clauses.append(
|
2018-05-13 12:55:15 +00:00
|
|
|
"({column} is null and {next_clauses})".format(
|
2018-05-13 12:44:22 +00:00
|
|
|
column=escape_sqlite(sort_desc),
|
2018-05-13 12:55:15 +00:00
|
|
|
next_clauses=" and ".join(next_by_pk_clauses),
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
where_clauses.append(
|
2018-05-13 12:55:15 +00:00
|
|
|
"({column} is not null or ({column} is null and {next_clauses}))".format(
|
2018-05-13 12:44:22 +00:00
|
|
|
column=escape_sqlite(sort),
|
2018-05-13 12:55:15 +00:00
|
|
|
next_clauses=" and ".join(next_by_pk_clauses),
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
where_clauses.append(
|
2018-05-13 12:55:15 +00:00
|
|
|
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
|
2018-05-13 12:44:22 +00:00
|
|
|
column=escape_sqlite(sort or sort_desc),
|
2018-05-13 12:55:15 +00:00
|
|
|
op=">" if sort else "<",
|
2018-05-13 12:44:22 +00:00
|
|
|
p=len(params),
|
2019-05-04 02:15:14 +00:00
|
|
|
extra_desc_only=""
|
|
|
|
if sort
|
|
|
|
else " or {column2} is null".format(
|
2018-05-13 12:55:15 +00:00
|
|
|
column2=escape_sqlite(sort or sort_desc)
|
2018-05-13 12:44:22 +00:00
|
|
|
),
|
2018-05-13 12:55:15 +00:00
|
|
|
next_clauses=" and ".join(next_by_pk_clauses),
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
)
|
2020-11-15 23:24:22 +00:00
|
|
|
params[f"p{len(params)}"] = sort_value
|
|
|
|
order_by = f"{order_by}, {order_by_pks}"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
|
|
|
where_clauses.extend(next_by_pk_clauses)
|
|
|
|
|
2018-05-13 12:55:15 +00:00
|
|
|
where_clause = ""
|
2018-05-13 12:44:22 +00:00
|
|
|
if where_clauses:
|
2020-11-15 23:24:22 +00:00
|
|
|
where_clause = f"where {' and '.join(where_clauses)} "
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
if order_by:
|
2021-05-31 02:31:14 +00:00
|
|
|
order_by = f"order by {order_by}"
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
extra_args = {}
|
2018-05-20 17:01:49 +00:00
|
|
|
# Handle ?_size=500
|
2020-05-28 05:00:04 +00:00
|
|
|
page_size = _size or request.args.get("_size") or table_metadata.get("size")
|
2018-05-13 12:44:22 +00:00
|
|
|
if page_size:
|
2018-05-13 12:55:15 +00:00
|
|
|
if page_size == "max":
|
2018-06-30 14:51:57 +00:00
|
|
|
page_size = self.ds.max_returned_rows
|
2018-05-13 12:44:22 +00:00
|
|
|
try:
|
|
|
|
page_size = int(page_size)
|
|
|
|
if page_size < 0:
|
|
|
|
raise ValueError
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
except ValueError:
|
2020-10-29 22:01:38 +00:00
|
|
|
raise BadRequest("_size must be a positive integer")
|
2018-05-13 12:55:15 +00:00
|
|
|
|
2018-06-30 14:51:57 +00:00
|
|
|
if page_size > self.ds.max_returned_rows:
|
2020-11-15 23:24:22 +00:00
|
|
|
raise BadRequest(f"_size must be <= {self.ds.max_returned_rows}")
|
2018-05-13 12:55:15 +00:00
|
|
|
|
|
|
|
extra_args["page_size"] = page_size
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2018-06-30 14:51:57 +00:00
|
|
|
page_size = self.ds.page_size
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Facets are calculated against SQL without order by or limit
|
2021-07-15 00:05:18 +00:00
|
|
|
sql_no_order_no_limit = (
|
|
|
|
"select {select_all_columns} from {table_name} {where}".format(
|
2021-05-31 02:31:14 +00:00
|
|
|
select_all_columns=select_all_columns,
|
2022-04-26 20:56:27 +00:00
|
|
|
table_name=escape_sqlite(table_name),
|
2021-05-31 02:31:14 +00:00
|
|
|
where=where_clause,
|
|
|
|
)
|
|
|
|
)
|
2021-12-16 21:43:44 +00:00
|
|
|
|
|
|
|
# This is the SQL that populates the main table on the page
|
2021-05-31 02:31:14 +00:00
|
|
|
sql = "select {select_specified_columns} from {table_name} {where}{order_by} limit {page_size}{offset}".format(
|
|
|
|
select_specified_columns=select_specified_columns,
|
2022-04-26 20:56:27 +00:00
|
|
|
table_name=escape_sqlite(table_name),
|
2018-05-13 12:44:22 +00:00
|
|
|
where=where_clause,
|
|
|
|
order_by=order_by,
|
2021-05-31 02:31:14 +00:00
|
|
|
page_size=page_size + 1,
|
|
|
|
offset=offset,
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2020-05-27 22:29:42 +00:00
|
|
|
if request.args.get("_timelimit"):
|
2020-05-29 22:57:46 +00:00
|
|
|
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Execute the main query!
|
2020-02-14 02:20:05 +00:00
|
|
|
results = await db.execute(sql, params, truncate=True, **extra_args)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Calculate the total count for this query
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
filtered_table_rows_count = None
|
2020-02-25 20:19:29 +00:00
|
|
|
if (
|
|
|
|
not db.is_mutable
|
|
|
|
and self.ds.inspect_data
|
2022-04-26 20:56:27 +00:00
|
|
|
and count_sql == f"select count(*) from {table_name} "
|
2020-02-25 20:19:29 +00:00
|
|
|
):
|
2021-12-16 21:43:44 +00:00
|
|
|
# We can use a previously cached table row count
|
2020-02-25 20:19:29 +00:00
|
|
|
try:
|
2022-04-26 20:56:27 +00:00
|
|
|
filtered_table_rows_count = self.ds.inspect_data[database_name][
|
|
|
|
"tables"
|
|
|
|
][table_name]["count"]
|
2020-02-25 20:19:29 +00:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Otherwise run a select count(*) ...
|
2021-06-02 02:53:00 +00:00
|
|
|
if count_sql and filtered_table_rows_count is None and not nocount:
|
2018-05-13 12:44:22 +00:00
|
|
|
try:
|
2020-02-14 02:20:05 +00:00
|
|
|
count_rows = list(await db.execute(count_sql, from_sql_params))
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
filtered_table_rows_count = count_rows[0][0]
|
2019-05-28 00:16:36 +00:00
|
|
|
except QueryInterrupted:
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
pass
|
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
# Faceting
|
2020-11-24 22:06:32 +00:00
|
|
|
if not self.ds.setting("allow_facet") and any(
|
2019-05-04 02:15:14 +00:00
|
|
|
arg.startswith("_facet") for arg in request.args
|
|
|
|
):
|
2020-10-29 22:01:38 +00:00
|
|
|
raise BadRequest("_facet= is not allowed")
|
2018-05-13 12:44:22 +00:00
|
|
|
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
# pylint: disable=no-member
|
|
|
|
facet_classes = list(
|
|
|
|
itertools.chain.from_iterable(pm.hook.register_facet_classes())
|
|
|
|
)
|
|
|
|
facet_results = {}
|
|
|
|
facets_timed_out = []
|
|
|
|
facet_instances = []
|
|
|
|
for klass in facet_classes:
|
2019-05-04 02:15:14 +00:00
|
|
|
facet_instances.append(
|
|
|
|
klass(
|
|
|
|
self.ds,
|
|
|
|
request,
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name,
|
2021-07-15 00:05:18 +00:00
|
|
|
sql=sql_no_order_no_limit,
|
2019-05-04 02:15:14 +00:00
|
|
|
params=params,
|
2022-04-26 20:56:27 +00:00
|
|
|
table=table_name,
|
2019-05-04 02:15:14 +00:00
|
|
|
metadata=table_metadata,
|
|
|
|
row_count=filtered_table_rows_count,
|
|
|
|
)
|
|
|
|
)
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
|
2022-04-26 22:48:56 +00:00
|
|
|
async def execute_facets():
|
|
|
|
if not nofacet:
|
|
|
|
# Run them in parallel
|
|
|
|
facet_awaitables = [facet.facet_results() for facet in facet_instances]
|
|
|
|
facet_awaitable_results = await gather(*facet_awaitables)
|
|
|
|
for (
|
2021-05-31 02:39:14 +00:00
|
|
|
instance_facet_results,
|
|
|
|
instance_facets_timed_out,
|
2022-04-26 22:48:56 +00:00
|
|
|
) in facet_awaitable_results:
|
|
|
|
for facet_info in instance_facet_results:
|
|
|
|
base_key = facet_info["name"]
|
|
|
|
key = base_key
|
|
|
|
i = 1
|
|
|
|
while key in facet_results:
|
|
|
|
i += 1
|
|
|
|
key = f"{base_key}_{i}"
|
|
|
|
facet_results[key] = facet_info
|
|
|
|
facets_timed_out.extend(instance_facets_timed_out)
|
|
|
|
|
2021-12-16 21:43:44 +00:00
|
|
|
suggested_facets = []
|
2022-04-26 22:48:56 +00:00
|
|
|
|
|
|
|
async def execute_suggested_facets():
|
|
|
|
# Calculate suggested facets
|
|
|
|
if (
|
|
|
|
self.ds.setting("suggest_facets")
|
|
|
|
and self.ds.setting("allow_facet")
|
|
|
|
and not _next
|
|
|
|
and not nofacet
|
|
|
|
and not nosuggest
|
|
|
|
):
|
|
|
|
# Run them in parallel
|
|
|
|
facet_suggest_awaitables = [
|
|
|
|
facet.suggest() for facet in facet_instances
|
|
|
|
]
|
|
|
|
for suggest_result in await gather(*facet_suggest_awaitables):
|
|
|
|
suggested_facets.extend(suggest_result)
|
|
|
|
|
|
|
|
await gather(execute_facets(), execute_suggested_facets())
|
2021-12-16 21:43:44 +00:00
|
|
|
|
Extract facet code out into a new plugin hook, closes #427 (#445)
Datasette previously only supported one type of faceting: exact column value counting.
With this change, faceting logic is extracted out into one or more separate classes which can implement other patterns of faceting - this is discussed in #427, but potential upcoming facet types include facet-by-date, facet-by-JSON-array, facet-by-many-2-many and more.
A new plugin hook, register_facet_classes, can be used by plugins to add in additional facet classes.
Each class must implement two methods: suggest(), which scans columns in the table to decide if they might be worth suggesting for faceting, and facet_results(), which executes the facet operation and returns results ready to be displayed in the UI.
2019-05-03 00:11:26 +00:00
|
|
|
# Figure out columns and rows for the query
|
2018-05-25 00:15:37 +00:00
|
|
|
columns = [r[0] for r in results.description]
|
|
|
|
rows = list(results.rows)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
# Expand labeled columns if requested
|
2018-06-17 22:56:55 +00:00
|
|
|
expanded_columns = []
|
2022-04-26 20:56:27 +00:00
|
|
|
expandable_columns = await self.expandable_columns(database_name, table_name)
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
columns_to_expand = None
|
|
|
|
try:
|
2021-12-22 20:22:44 +00:00
|
|
|
all_labels = value_as_boolean(request.args.get("_labels", ""))
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
except ValueError:
|
|
|
|
all_labels = default_labels
|
|
|
|
# Check for explicit _label=
|
|
|
|
if "_label" in request.args:
|
2020-05-29 22:57:46 +00:00
|
|
|
columns_to_expand = request.args.getlist("_label")
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
if columns_to_expand is None and all_labels:
|
|
|
|
# expand all columns with foreign keys
|
2019-05-04 02:15:14 +00:00
|
|
|
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
|
|
|
|
if columns_to_expand:
|
|
|
|
expanded_labels = {}
|
2019-04-07 03:11:08 +00:00
|
|
|
for fk, _ in expandable_columns:
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
column = fk["column"]
|
|
|
|
if column not in columns_to_expand:
|
|
|
|
continue
|
2021-05-27 04:17:43 +00:00
|
|
|
if column not in columns:
|
|
|
|
continue
|
2018-06-17 22:56:55 +00:00
|
|
|
expanded_columns.append(column)
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
# Gather the values
|
|
|
|
column_index = columns.index(column)
|
|
|
|
values = [row[column_index] for row in rows]
|
|
|
|
# Expand them
|
2019-05-04 02:15:14 +00:00
|
|
|
expanded_labels.update(
|
2022-04-26 20:56:27 +00:00
|
|
|
await self.ds.expand_foreign_keys(
|
|
|
|
database_name, table_name, column, values
|
|
|
|
)
|
2019-05-04 02:15:14 +00:00
|
|
|
)
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
if expanded_labels:
|
|
|
|
# Rewrite the rows
|
|
|
|
new_rows = []
|
|
|
|
for row in rows:
|
|
|
|
new_row = CustomRow(columns)
|
|
|
|
for column in row.keys():
|
|
|
|
value = row[column]
|
2019-11-02 22:29:40 +00:00
|
|
|
if (column, value) in expanded_labels and value is not None:
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
new_row[column] = {
|
2019-05-04 02:15:14 +00:00
|
|
|
"value": value,
|
|
|
|
"label": expanded_labels[(column, value)],
|
?_labels= and ?_label=COL to expand foreign keys in JSON/CSV
These new querystring arguments can be used to request expanded foreign keys
in both JSON and CSV formats.
?_labels=on turns on expansions for ALL foreign key columns
?_label=COLUMN1&_label=COLUMN2 can be used to pick specific columns to expand
e.g. `Street_Tree_List.json?_label=qSpecies&_label=qLegalStatus`
{
"rowid": 233,
"TreeID": 121240,
"qLegalStatus": {
"value" 2,
"label": "Private"
}
"qSpecies": {
"value": 16,
"label": "Sycamore"
}
"qAddress": "91 Commonwealth Ave",
...
}
The labels option also works for the HTML and CSV views.
HTML defaults to `?_labels=on`, so if you pass `?_labels=off` you can disable
foreign key expansion entirely - or you can use `?_label=COLUMN` to request
just specific columns.
If you expand labels on CSV you get additional columns in the output:
`/Street_Tree_List.csv?_label=qLegalStatus`
rowid,TreeID,qLegalStatus,qLegalStatus_label...
1,141565,1,Permitted Site...
2,232565,2,Undocumented...
I also refactored the existing foreign key expansion code.
Closes #233. Refs #266.
2018-06-16 22:18:57 +00:00
|
|
|
}
|
|
|
|
else:
|
|
|
|
new_row[column] = value
|
|
|
|
new_rows.append(new_row)
|
|
|
|
rows = new_rows
|
|
|
|
|
2018-05-13 12:44:22 +00:00
|
|
|
# Pagination next link
|
|
|
|
next_value = None
|
|
|
|
next_url = None
|
2020-12-23 17:04:32 +00:00
|
|
|
if 0 < page_size < len(rows):
|
2018-05-13 12:44:22 +00:00
|
|
|
if is_view:
|
|
|
|
next_value = int(_next or 0) + page_size
|
|
|
|
else:
|
|
|
|
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
|
|
|
|
# If there's a sort or sort_desc, add that value as a prefix
|
|
|
|
if (sort or sort_desc) and not is_view:
|
2022-08-14 15:44:02 +00:00
|
|
|
try:
|
|
|
|
prefix = rows[-2][sort or sort_desc]
|
|
|
|
except IndexError:
|
|
|
|
# sort/sort_desc column missing from SELECT - look up value by PK instead
|
|
|
|
prefix_where_clause = " and ".join(
|
|
|
|
"[{}] = :pk{}".format(pk, i) for i, pk in enumerate(pks)
|
|
|
|
)
|
|
|
|
prefix_lookup_sql = "select [{}] from [{}] where {}".format(
|
|
|
|
sort or sort_desc, table_name, prefix_where_clause
|
|
|
|
)
|
|
|
|
prefix = (
|
|
|
|
await db.execute(
|
|
|
|
prefix_lookup_sql,
|
|
|
|
{
|
|
|
|
**{
|
|
|
|
"pk{}".format(i): rows[-2][pk]
|
|
|
|
for i, pk in enumerate(pks)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
|
|
|
).single_value()
|
2019-05-28 02:23:18 +00:00
|
|
|
if isinstance(prefix, dict) and "value" in prefix:
|
|
|
|
prefix = prefix["value"]
|
2018-05-13 12:44:22 +00:00
|
|
|
if prefix is None:
|
2018-05-13 12:55:15 +00:00
|
|
|
prefix = "$null"
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2022-03-15 18:01:57 +00:00
|
|
|
prefix = tilde_encode(str(prefix))
|
2020-11-15 23:24:22 +00:00
|
|
|
next_value = f"{prefix},{next_value}"
|
2018-05-13 12:55:15 +00:00
|
|
|
added_args = {"_next": next_value}
|
2018-05-13 12:44:22 +00:00
|
|
|
if sort:
|
2018-05-13 12:55:15 +00:00
|
|
|
added_args["_sort"] = sort
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2018-05-13 12:55:15 +00:00
|
|
|
added_args["_sort_desc"] = sort_desc
|
2018-05-13 12:44:22 +00:00
|
|
|
else:
|
2018-05-13 12:55:15 +00:00
|
|
|
added_args = {"_next": next_value}
|
2019-04-13 19:16:05 +00:00
|
|
|
next_url = self.ds.absolute_url(
|
2021-11-20 19:03:08 +00:00
|
|
|
request, self.ds.urls.path(path_with_replaced_args(request, added_args))
|
2018-05-13 12:55:15 +00:00
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
rows = rows[:page_size]
|
|
|
|
|
|
|
|
# human_description_en combines filters AND search, if provided
|
2019-05-23 05:44:34 +00:00
|
|
|
human_description_en = filters.human_description_en(
|
|
|
|
extra=extra_human_descriptions
|
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
|
|
|
if sort or sort_desc:
|
2018-05-13 12:55:15 +00:00
|
|
|
sorted_by = "sorted by {}{}".format(
|
|
|
|
(sort or sort_desc), " descending" if sort_desc else ""
|
|
|
|
)
|
|
|
|
human_description_en = " ".join(
|
|
|
|
[b for b in [human_description_en, sorted_by] if b]
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
async def extra_template():
|
2020-03-21 23:57:37 +00:00
|
|
|
nonlocal sort
|
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
display_columns, display_rows = await display_columns_and_rows(
|
|
|
|
self.ds,
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name,
|
|
|
|
table_name,
|
2018-05-25 00:15:37 +00:00
|
|
|
results.description,
|
2018-05-13 12:55:15 +00:00
|
|
|
rows,
|
|
|
|
link_column=not is_view,
|
2020-11-24 22:06:32 +00:00
|
|
|
truncate_cells=self.ds.setting("truncate_cells_html"),
|
2022-04-25 18:33:35 +00:00
|
|
|
sortable_columns=await self.sortable_columns_for_table(
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name, table_name, use_rowid=True
|
2022-04-25 18:33:35 +00:00
|
|
|
),
|
2018-05-13 12:55:15 +00:00
|
|
|
)
|
2019-05-04 02:15:14 +00:00
|
|
|
metadata = (
|
|
|
|
(self.ds.metadata("databases") or {})
|
2022-04-26 20:56:27 +00:00
|
|
|
.get(database_name, {})
|
2019-05-04 02:15:14 +00:00
|
|
|
.get("tables", {})
|
2022-04-26 20:56:27 +00:00
|
|
|
.get(table_name, {})
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
self.ds.update_with_inherited_metadata(metadata)
|
2021-01-25 03:10:10 +00:00
|
|
|
|
2019-04-12 04:21:17 +00:00
|
|
|
form_hidden_args = []
|
2021-01-25 03:10:10 +00:00
|
|
|
for key in request.args:
|
2021-11-30 03:04:20 +00:00
|
|
|
if (
|
|
|
|
key.startswith("_")
|
2022-08-14 16:13:12 +00:00
|
|
|
and key not in ("_sort", "_sort_desc", "_search", "_next")
|
2022-01-14 00:27:21 +00:00
|
|
|
and "__" not in key
|
2021-11-30 03:04:20 +00:00
|
|
|
):
|
2021-01-25 03:10:10 +00:00
|
|
|
for value in request.args.getlist(key):
|
|
|
|
form_hidden_args.append((key, value))
|
2020-03-21 23:57:37 +00:00
|
|
|
|
|
|
|
# if no sort specified AND table has a single primary key,
|
|
|
|
# set sort to that so arrow is displayed
|
|
|
|
if not sort and not sort_desc:
|
|
|
|
if 1 == len(pks):
|
|
|
|
sort = pks[0]
|
|
|
|
elif use_rowid:
|
|
|
|
sort = "rowid"
|
|
|
|
|
2020-10-30 05:16:41 +00:00
|
|
|
async def table_actions():
|
|
|
|
links = []
|
|
|
|
for hook in pm.hook.table_actions(
|
|
|
|
datasette=self.ds,
|
2022-04-26 20:56:27 +00:00
|
|
|
table=table_name,
|
|
|
|
database=database_name,
|
2020-10-30 05:16:41 +00:00
|
|
|
actor=request.actor,
|
2021-06-10 04:45:24 +00:00
|
|
|
request=request,
|
2020-10-30 05:16:41 +00:00
|
|
|
):
|
|
|
|
extra_links = await await_me_maybe(hook)
|
|
|
|
if extra_links:
|
|
|
|
links.extend(extra_links)
|
|
|
|
return links
|
|
|
|
|
2021-11-14 05:08:33 +00:00
|
|
|
# filter_columns combine the columns we know are available
|
|
|
|
# in the table with any additional columns (such as rowid)
|
|
|
|
# which are available in the query
|
|
|
|
filter_columns = list(columns) + [
|
|
|
|
table_column
|
|
|
|
for table_column in table_columns
|
|
|
|
if table_column not in columns
|
|
|
|
]
|
2021-12-17 19:02:14 +00:00
|
|
|
d = {
|
2020-10-30 05:16:41 +00:00
|
|
|
"table_actions": table_actions,
|
2018-05-13 12:55:15 +00:00
|
|
|
"use_rowid": use_rowid,
|
|
|
|
"filters": filters,
|
|
|
|
"display_columns": display_columns,
|
2021-11-14 05:08:33 +00:00
|
|
|
"filter_columns": filter_columns,
|
2018-05-13 12:55:15 +00:00
|
|
|
"display_rows": display_rows,
|
2018-05-18 06:07:45 +00:00
|
|
|
"facets_timed_out": facets_timed_out,
|
2018-05-16 05:06:05 +00:00
|
|
|
"sorted_facet_results": sorted(
|
|
|
|
facet_results.values(),
|
2018-05-16 14:43:13 +00:00
|
|
|
key=lambda f: (len(f["results"]), f["name"]),
|
2019-05-04 02:15:14 +00:00
|
|
|
reverse=True,
|
2018-05-16 05:06:05 +00:00
|
|
|
),
|
2019-04-12 04:21:17 +00:00
|
|
|
"form_hidden_args": form_hidden_args,
|
2018-05-13 12:55:15 +00:00
|
|
|
"is_sortable": any(c["sortable"] for c in display_columns),
|
2021-11-20 01:01:17 +00:00
|
|
|
"fix_path": self.ds.urls.path,
|
2018-05-15 09:34:45 +00:00
|
|
|
"path_with_replaced_args": path_with_replaced_args,
|
2018-05-15 10:11:52 +00:00
|
|
|
"path_with_removed_args": path_with_removed_args,
|
2018-06-18 06:03:22 +00:00
|
|
|
"append_querystring": append_querystring,
|
2018-05-13 12:55:15 +00:00
|
|
|
"request": request,
|
|
|
|
"sort": sort,
|
|
|
|
"sort_desc": sort_desc,
|
|
|
|
"disable_sort": is_view,
|
2019-07-03 00:50:45 +00:00
|
|
|
"custom_table_templates": [
|
2022-04-26 20:56:27 +00:00
|
|
|
f"_table-{to_css_class(database_name)}-{to_css_class(table_name)}.html",
|
|
|
|
f"_table-table-{to_css_class(database_name)}-{to_css_class(table_name)}.html",
|
2019-07-03 00:50:45 +00:00
|
|
|
"_table.html",
|
2018-05-13 12:44:22 +00:00
|
|
|
],
|
2018-05-13 12:55:15 +00:00
|
|
|
"metadata": metadata,
|
2022-04-26 20:56:27 +00:00
|
|
|
"view_definition": await db.get_view_definition(table_name),
|
|
|
|
"table_definition": await db.get_table_definition(table_name),
|
2022-03-24 19:16:19 +00:00
|
|
|
"datasette_allow_facet": "true"
|
|
|
|
if self.ds.setting("allow_facet")
|
|
|
|
else "false",
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
2021-12-17 19:02:14 +00:00
|
|
|
d.update(extra_context_from_filters)
|
|
|
|
return d
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2019-05-04 02:15:14 +00:00
|
|
|
return (
|
|
|
|
{
|
2022-04-26 20:56:27 +00:00
|
|
|
"database": database_name,
|
|
|
|
"table": table_name,
|
2019-05-04 02:15:14 +00:00
|
|
|
"is_view": is_view,
|
|
|
|
"human_description_en": human_description_en,
|
|
|
|
"rows": rows[:page_size],
|
|
|
|
"truncated": results.truncated,
|
|
|
|
"filtered_table_rows_count": filtered_table_rows_count,
|
|
|
|
"expanded_columns": expanded_columns,
|
|
|
|
"expandable_columns": expandable_columns,
|
|
|
|
"columns": columns,
|
|
|
|
"primary_keys": pks,
|
|
|
|
"units": units,
|
|
|
|
"query": {"sql": sql, "params": params},
|
|
|
|
"facet_results": facet_results,
|
|
|
|
"suggested_facets": suggested_facets,
|
|
|
|
"next": next_value and str(next_value) or None,
|
|
|
|
"next_url": next_url,
|
2020-06-08 18:07:11 +00:00
|
|
|
"private": private,
|
2020-06-09 00:05:44 +00:00
|
|
|
"allow_execute_sql": await self.ds.permission_allowed(
|
2022-04-26 20:56:27 +00:00
|
|
|
request.actor, "execute-sql", database_name, default=True
|
2020-06-09 00:05:44 +00:00
|
|
|
),
|
2019-05-04 02:15:14 +00:00
|
|
|
},
|
|
|
|
extra_template,
|
|
|
|
(
|
2022-04-26 20:56:27 +00:00
|
|
|
f"table-{to_css_class(database_name)}-{to_css_class(table_name)}.html",
|
2019-05-04 02:15:14 +00:00
|
|
|
"table.html",
|
|
|
|
),
|
2018-05-13 12:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
async def display_columns_and_rows(
|
|
|
|
datasette,
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name,
|
|
|
|
table_name,
|
2022-04-25 18:33:35 +00:00
|
|
|
description,
|
|
|
|
rows,
|
|
|
|
link_column=False,
|
|
|
|
truncate_cells=0,
|
|
|
|
sortable_columns=None,
|
|
|
|
):
|
|
|
|
"""Returns columns, rows for specified table - including fancy foreign key treatment"""
|
|
|
|
sortable_columns = sortable_columns or set()
|
2022-04-26 20:56:27 +00:00
|
|
|
db = datasette.databases[database_name]
|
|
|
|
table_metadata = datasette.table_metadata(database_name, table_name)
|
2022-04-25 18:33:35 +00:00
|
|
|
column_descriptions = table_metadata.get("columns") or {}
|
2022-04-26 20:56:27 +00:00
|
|
|
column_details = {
|
|
|
|
col.name: col for col in await db.table_column_details(table_name)
|
|
|
|
}
|
|
|
|
pks = await db.primary_keys(table_name)
|
2022-04-25 18:33:35 +00:00
|
|
|
pks_for_display = pks
|
|
|
|
if not pks_for_display:
|
|
|
|
pks_for_display = ["rowid"]
|
|
|
|
|
|
|
|
columns = []
|
|
|
|
for r in description:
|
|
|
|
if r[0] == "rowid" and "rowid" not in column_details:
|
|
|
|
type_ = "integer"
|
|
|
|
notnull = 0
|
|
|
|
else:
|
|
|
|
type_ = column_details[r[0]].type
|
|
|
|
notnull = column_details[r[0]].notnull
|
|
|
|
columns.append(
|
|
|
|
{
|
|
|
|
"name": r[0],
|
|
|
|
"sortable": r[0] in sortable_columns,
|
|
|
|
"is_pk": r[0] in pks_for_display,
|
|
|
|
"type": type_,
|
|
|
|
"notnull": notnull,
|
|
|
|
"description": column_descriptions.get(r[0]),
|
2018-05-13 12:44:22 +00:00
|
|
|
}
|
2022-04-25 18:33:35 +00:00
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
column_to_foreign_key_table = {
|
2022-04-26 20:56:27 +00:00
|
|
|
fk["column"]: fk["other_table"]
|
|
|
|
for fk in await db.foreign_keys_for_table(table_name)
|
2022-04-25 18:33:35 +00:00
|
|
|
}
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
cell_rows = []
|
|
|
|
base_url = datasette.setting("base_url")
|
|
|
|
for row in rows:
|
|
|
|
cells = []
|
|
|
|
# Unless we are a view, the first column is a link - either to the rowid
|
|
|
|
# or to the simple or compound primary key
|
|
|
|
if link_column:
|
|
|
|
is_special_link_column = len(pks) != 1
|
|
|
|
pk_path = path_from_row_pks(row, pks, not pks, False)
|
|
|
|
cells.append(
|
|
|
|
{
|
|
|
|
"column": pks[0] if len(pks) == 1 else "Link",
|
|
|
|
"value_type": "pk",
|
|
|
|
"is_special_link_column": is_special_link_column,
|
|
|
|
"raw": pk_path,
|
|
|
|
"value": markupsafe.Markup(
|
|
|
|
'<a href="{table_path}/{flat_pks_quoted}">{flat_pks}</a>'.format(
|
|
|
|
base_url=base_url,
|
2022-04-26 20:56:27 +00:00
|
|
|
table_path=datasette.urls.table(database_name, table_name),
|
2022-04-25 18:33:35 +00:00
|
|
|
flat_pks=str(markupsafe.escape(pk_path)),
|
|
|
|
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
|
|
|
|
)
|
|
|
|
),
|
|
|
|
}
|
2018-05-13 12:55:15 +00:00
|
|
|
)
|
2018-05-13 12:44:22 +00:00
|
|
|
|
2022-04-25 18:33:35 +00:00
|
|
|
for value, column_dict in zip(row, columns):
|
|
|
|
column = column_dict["name"]
|
|
|
|
if link_column and len(pks) == 1 and column == pks[0]:
|
|
|
|
# If there's a simple primary key, don't repeat the value as it's
|
|
|
|
# already shown in the link column.
|
|
|
|
continue
|
|
|
|
|
|
|
|
# First let the plugins have a go
|
|
|
|
# pylint: disable=no-member
|
|
|
|
plugin_display_value = None
|
|
|
|
for candidate in pm.hook.render_cell(
|
2022-07-07 16:30:49 +00:00
|
|
|
row=row,
|
2022-04-25 18:33:35 +00:00
|
|
|
value=value,
|
|
|
|
column=column,
|
2022-04-26 20:56:27 +00:00
|
|
|
table=table_name,
|
|
|
|
database=database_name,
|
2022-04-25 18:33:35 +00:00
|
|
|
datasette=datasette,
|
|
|
|
):
|
|
|
|
candidate = await await_me_maybe(candidate)
|
|
|
|
if candidate is not None:
|
|
|
|
plugin_display_value = candidate
|
|
|
|
break
|
|
|
|
if plugin_display_value:
|
|
|
|
display_value = plugin_display_value
|
|
|
|
elif isinstance(value, bytes):
|
|
|
|
formatted = format_bytes(len(value))
|
|
|
|
display_value = markupsafe.Markup(
|
|
|
|
'<a class="blob-download" href="{}"{}><Binary: {:,} byte{}></a>'.format(
|
|
|
|
datasette.urls.row_blob(
|
2022-04-26 20:56:27 +00:00
|
|
|
database_name,
|
|
|
|
table_name,
|
2022-04-25 18:33:35 +00:00
|
|
|
path_from_row_pks(row, pks, not pks),
|
|
|
|
column,
|
|
|
|
),
|
|
|
|
' title="{}"'.format(formatted)
|
|
|
|
if "bytes" not in formatted
|
|
|
|
else "",
|
|
|
|
len(value),
|
|
|
|
"" if len(value) == 1 else "s",
|
|
|
|
)
|
2018-05-13 12:55:15 +00:00
|
|
|
)
|
2022-04-25 18:33:35 +00:00
|
|
|
elif isinstance(value, dict):
|
|
|
|
# It's an expanded foreign key - display link to other row
|
|
|
|
label = value["label"]
|
|
|
|
value = value["value"]
|
|
|
|
# The table we link to depends on the column
|
|
|
|
other_table = column_to_foreign_key_table[column]
|
|
|
|
link_template = LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
|
|
|
|
display_value = markupsafe.Markup(
|
|
|
|
link_template.format(
|
2022-04-26 20:56:27 +00:00
|
|
|
database=database_name,
|
2022-04-25 18:33:35 +00:00
|
|
|
base_url=base_url,
|
|
|
|
table=tilde_encode(other_table),
|
|
|
|
link_id=tilde_encode(str(value)),
|
|
|
|
id=str(markupsafe.escape(value)),
|
|
|
|
label=str(markupsafe.escape(label)) or "-",
|
|
|
|
)
|
|
|
|
)
|
|
|
|
elif value in ("", None):
|
|
|
|
display_value = markupsafe.Markup(" ")
|
|
|
|
elif is_url(str(value).strip()):
|
|
|
|
display_value = markupsafe.Markup(
|
2022-09-06 23:50:43 +00:00
|
|
|
'<a href="{url}">{truncated_url}</a>'.format(
|
|
|
|
url=markupsafe.escape(value.strip()),
|
|
|
|
truncated_url=markupsafe.escape(
|
|
|
|
truncate_url(value.strip(), truncate_cells)
|
|
|
|
),
|
2022-04-25 18:33:35 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
elif column in table_metadata.get("units", {}) and value != "":
|
|
|
|
# Interpret units using pint
|
|
|
|
value = value * ureg(table_metadata["units"][column])
|
|
|
|
# Pint uses floating point which sometimes introduces errors in the compact
|
|
|
|
# representation, which we have to round off to avoid ugliness. In the vast
|
|
|
|
# majority of cases this rounding will be inconsequential. I hope.
|
|
|
|
value = round(value.to_compact(), 6)
|
|
|
|
display_value = markupsafe.Markup(f"{value:~P}".replace(" ", " "))
|
|
|
|
else:
|
|
|
|
display_value = str(value)
|
|
|
|
if truncate_cells and len(display_value) > truncate_cells:
|
|
|
|
display_value = display_value[:truncate_cells] + "\u2026"
|
|
|
|
|
|
|
|
cells.append(
|
|
|
|
{
|
|
|
|
"column": column,
|
|
|
|
"value": display_value,
|
|
|
|
"raw": value,
|
|
|
|
"value_type": "none"
|
|
|
|
if value is None
|
|
|
|
else str(type(value).__name__),
|
|
|
|
}
|
2021-11-30 06:34:31 +00:00
|
|
|
)
|
2022-04-25 18:33:35 +00:00
|
|
|
cell_rows.append(Row(cells))
|
|
|
|
|
|
|
|
if link_column:
|
|
|
|
# Add the link column header.
|
|
|
|
# If it's a simple primary key, we have to remove and re-add that column name at
|
|
|
|
# the beginning of the header row.
|
|
|
|
first_column = None
|
|
|
|
if len(pks) == 1:
|
|
|
|
columns = [col for col in columns if col["name"] != pks[0]]
|
|
|
|
first_column = {
|
|
|
|
"name": pks[0],
|
|
|
|
"sortable": len(pks) == 1,
|
|
|
|
"is_pk": True,
|
|
|
|
"type": column_details[pks[0]].type,
|
|
|
|
"notnull": column_details[pks[0]].notnull,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
first_column = {
|
|
|
|
"name": "Link",
|
|
|
|
"sortable": False,
|
|
|
|
"is_pk": False,
|
|
|
|
"type": "",
|
|
|
|
"notnull": 0,
|
|
|
|
}
|
|
|
|
columns = [first_column] + columns
|
|
|
|
return columns, cell_rows
|
2022-10-27 20:17:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TableInsertView(BaseView):
|
|
|
|
name = "table-insert"
|
|
|
|
|
|
|
|
def __init__(self, datasette):
|
|
|
|
self.ds = datasette
|
|
|
|
|
2022-10-30 06:03:45 +00:00
|
|
|
async def _validate_data(self, request, db, table_name):
|
|
|
|
errors = []
|
|
|
|
|
|
|
|
def _errors(errors):
|
|
|
|
return None, errors, {}
|
|
|
|
|
|
|
|
if request.headers.get("content-type") != "application/json":
|
|
|
|
# TODO: handle form-encoded data
|
|
|
|
return _errors(["Invalid content-type, must be application/json"])
|
|
|
|
body = await request.post_body()
|
|
|
|
try:
|
|
|
|
data = json.loads(body)
|
|
|
|
except json.JSONDecodeError as e:
|
|
|
|
return _errors(["Invalid JSON: {}".format(e)])
|
|
|
|
if not isinstance(data, dict):
|
|
|
|
return _errors(["JSON must be a dictionary"])
|
|
|
|
keys = data.keys()
|
2022-11-01 18:07:59 +00:00
|
|
|
|
2022-10-30 06:03:45 +00:00
|
|
|
# keys must contain "row" or "rows"
|
|
|
|
if "row" not in keys and "rows" not in keys:
|
|
|
|
return _errors(['JSON must have one or other of "row" or "rows"'])
|
|
|
|
rows = []
|
|
|
|
if "row" in keys:
|
|
|
|
if "rows" in keys:
|
|
|
|
return _errors(['Cannot use "row" and "rows" at the same time'])
|
|
|
|
row = data["row"]
|
|
|
|
if not isinstance(row, dict):
|
|
|
|
return _errors(['"row" must be a dictionary'])
|
|
|
|
rows = [row]
|
2022-11-14 05:49:23 +00:00
|
|
|
data["return"] = True
|
2022-10-30 06:03:45 +00:00
|
|
|
else:
|
|
|
|
rows = data["rows"]
|
|
|
|
if not isinstance(rows, list):
|
|
|
|
return _errors(['"rows" must be a list'])
|
|
|
|
for row in rows:
|
|
|
|
if not isinstance(row, dict):
|
|
|
|
return _errors(['"rows" must be a list of dictionaries'])
|
2022-11-01 18:07:59 +00:00
|
|
|
|
2022-10-30 06:03:45 +00:00
|
|
|
# Does this exceed max_insert_rows?
|
|
|
|
max_insert_rows = self.ds.setting("max_insert_rows")
|
|
|
|
if len(rows) > max_insert_rows:
|
|
|
|
return _errors(
|
|
|
|
["Too many rows, maximum allowed is {}".format(max_insert_rows)]
|
|
|
|
)
|
2022-11-01 18:07:59 +00:00
|
|
|
|
|
|
|
# Validate other parameters
|
|
|
|
extras = {
|
|
|
|
key: value for key, value in data.items() if key not in ("row", "rows")
|
|
|
|
}
|
2022-11-14 05:49:23 +00:00
|
|
|
valid_extras = {"return", "ignore", "replace"}
|
2022-11-01 18:07:59 +00:00
|
|
|
invalid_extras = extras.keys() - valid_extras
|
|
|
|
if invalid_extras:
|
|
|
|
return _errors(
|
|
|
|
['Invalid parameter: "{}"'.format('", "'.join(sorted(invalid_extras)))]
|
|
|
|
)
|
|
|
|
if extras.get("ignore") and extras.get("replace"):
|
|
|
|
return _errors(['Cannot use "ignore" and "replace" at the same time'])
|
|
|
|
|
2022-10-30 06:03:45 +00:00
|
|
|
# Validate columns of each row
|
2022-11-01 18:07:59 +00:00
|
|
|
columns = set(await db.table_columns(table_name))
|
2022-10-30 06:03:45 +00:00
|
|
|
for i, row in enumerate(rows):
|
2022-11-01 18:07:59 +00:00
|
|
|
invalid_columns = set(row.keys()) - columns
|
2022-10-30 06:03:45 +00:00
|
|
|
if invalid_columns:
|
|
|
|
errors.append(
|
|
|
|
"Row {} has invalid columns: {}".format(
|
|
|
|
i, ", ".join(sorted(invalid_columns))
|
|
|
|
)
|
|
|
|
)
|
|
|
|
if errors:
|
|
|
|
return _errors(errors)
|
2022-11-01 18:07:59 +00:00
|
|
|
return rows, errors, extras
|
2022-10-30 06:03:45 +00:00
|
|
|
|
2022-10-27 20:17:18 +00:00
|
|
|
async def post(self, request):
|
|
|
|
try:
|
2022-11-18 22:46:25 +00:00
|
|
|
resolved = await self.ds.resolve_table(request)
|
|
|
|
except NotFound as e:
|
|
|
|
return _error([e.args[0]], 404)
|
|
|
|
db = resolved.db
|
2022-10-27 20:17:18 +00:00
|
|
|
database_name = db.name
|
2022-11-18 22:46:25 +00:00
|
|
|
table_name = resolved.table
|
2022-10-30 06:03:45 +00:00
|
|
|
|
2022-10-27 20:17:18 +00:00
|
|
|
# Table must exist (may handle table creation in the future)
|
|
|
|
db = self.ds.get_database(database_name)
|
|
|
|
if not await db.table_exists(table_name):
|
2022-10-30 06:03:45 +00:00
|
|
|
return _error(["Table not found: {}".format(table_name)], 404)
|
2022-10-27 20:17:18 +00:00
|
|
|
# Must have insert-row permission
|
|
|
|
if not await self.ds.permission_allowed(
|
|
|
|
request.actor, "insert-row", resource=(database_name, table_name)
|
|
|
|
):
|
2022-10-30 06:03:45 +00:00
|
|
|
return _error(["Permission denied"], 403)
|
2022-11-01 18:07:59 +00:00
|
|
|
rows, errors, extras = await self._validate_data(request, db, table_name)
|
2022-10-30 06:03:45 +00:00
|
|
|
if errors:
|
|
|
|
return _error(errors, 400)
|
|
|
|
|
2022-11-01 18:07:59 +00:00
|
|
|
ignore = extras.get("ignore")
|
|
|
|
replace = extras.get("replace")
|
|
|
|
|
2022-11-14 05:49:23 +00:00
|
|
|
should_return = bool(extras.get("return", False))
|
2022-10-30 06:03:45 +00:00
|
|
|
# Insert rows
|
|
|
|
def insert_rows(conn):
|
|
|
|
table = sqlite_utils.Database(conn)[table_name]
|
|
|
|
if should_return:
|
|
|
|
rowids = []
|
|
|
|
for row in rows:
|
2022-11-01 18:07:59 +00:00
|
|
|
rowids.append(
|
|
|
|
table.insert(row, ignore=ignore, replace=replace).last_rowid
|
|
|
|
)
|
2022-10-30 06:03:45 +00:00
|
|
|
return list(
|
|
|
|
table.rows_where(
|
2022-10-30 22:17:21 +00:00
|
|
|
"rowid in ({})".format(",".join("?" for _ in rowids)),
|
|
|
|
rowids,
|
2022-10-30 06:03:45 +00:00
|
|
|
)
|
2022-10-27 20:17:18 +00:00
|
|
|
)
|
2022-10-30 06:03:45 +00:00
|
|
|
else:
|
2022-11-01 18:07:59 +00:00
|
|
|
table.insert_all(rows, ignore=ignore, replace=replace)
|
2022-10-30 06:03:45 +00:00
|
|
|
|
2022-11-01 19:59:17 +00:00
|
|
|
try:
|
|
|
|
rows = await db.execute_write_fn(insert_rows)
|
|
|
|
except Exception as e:
|
|
|
|
return _error([str(e)])
|
2022-10-30 06:03:45 +00:00
|
|
|
result = {"ok": True}
|
|
|
|
if should_return:
|
2022-11-01 18:07:59 +00:00
|
|
|
result["rows"] = rows
|
2022-10-30 06:03:45 +00:00
|
|
|
return Response.json(result, status=201)
|
2022-10-30 22:17:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TableDropView(BaseView):
|
|
|
|
name = "table-drop"
|
|
|
|
|
|
|
|
def __init__(self, datasette):
|
|
|
|
self.ds = datasette
|
|
|
|
|
|
|
|
async def post(self, request):
|
|
|
|
try:
|
2022-11-18 22:46:25 +00:00
|
|
|
resolved = await self.ds.resolve_table(request)
|
|
|
|
except NotFound as e:
|
|
|
|
return _error([e.args[0]], 404)
|
|
|
|
db = resolved.db
|
2022-10-30 22:17:21 +00:00
|
|
|
database_name = db.name
|
2022-11-18 22:46:25 +00:00
|
|
|
table_name = resolved.table
|
2022-10-30 22:17:21 +00:00
|
|
|
# Table must exist
|
|
|
|
db = self.ds.get_database(database_name)
|
|
|
|
if not await db.table_exists(table_name):
|
|
|
|
return _error(["Table not found: {}".format(table_name)], 404)
|
|
|
|
if not await self.ds.permission_allowed(
|
|
|
|
request.actor, "drop-table", resource=(database_name, table_name)
|
|
|
|
):
|
|
|
|
return _error(["Permission denied"], 403)
|
2022-11-14 05:40:10 +00:00
|
|
|
if not db.is_mutable:
|
|
|
|
return _error(["Database is immutable"], 403)
|
2022-11-14 05:17:18 +00:00
|
|
|
confirm = False
|
|
|
|
try:
|
|
|
|
data = json.loads(await request.post_body())
|
|
|
|
confirm = data.get("confirm")
|
|
|
|
except json.JSONDecodeError as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if not confirm:
|
|
|
|
return Response.json(
|
|
|
|
{
|
|
|
|
"ok": True,
|
2022-11-14 05:40:10 +00:00
|
|
|
"database": database_name,
|
|
|
|
"table": table_name,
|
2022-11-14 05:17:18 +00:00
|
|
|
"row_count": (
|
|
|
|
await db.execute("select count(*) from [{}]".format(table_name))
|
|
|
|
).single_value(),
|
|
|
|
"message": 'Pass "confirm": true to confirm',
|
|
|
|
},
|
|
|
|
status=200,
|
|
|
|
)
|
|
|
|
|
2022-10-30 22:17:21 +00:00
|
|
|
# Drop table
|
|
|
|
def drop_table(conn):
|
|
|
|
sqlite_utils.Database(conn)[table_name].drop()
|
|
|
|
|
|
|
|
await db.execute_write_fn(drop_table)
|
|
|
|
return Response.json({"ok": True}, status=200)
|