diff --git a/backend/src/baserow/contrib/database/migrations/0204_add_row_exists_not_trashed_function.py b/backend/src/baserow/contrib/database/migrations/0204_add_row_exists_not_trashed_function.py new file mode 100644 index 0000000000..442ec5c7cb --- /dev/null +++ b/backend/src/baserow/contrib/database/migrations/0204_add_row_exists_not_trashed_function.py @@ -0,0 +1,40 @@ +from django.db import migrations + +forward_sql = """ +CREATE OR REPLACE FUNCTION row_exists_not_trashed(p_table_id integer, p_row_id integer) +RETURNS boolean AS $$ +DECLARE + table_name text; + result boolean; +BEGIN + table_name := 'database_table_' || p_table_id; + + IF NOT EXISTS ( + SELECT 1 FROM pg_class WHERE relname = table_name AND relkind = 'r' + ) THEN + RETURN false; + END IF; + + EXECUTE format( + 'SELECT EXISTS(SELECT 1 FROM %I WHERE id = $1 AND trashed = false)', + table_name + ) INTO result USING p_row_id; + + RETURN COALESCE(result, false); +END; +$$ LANGUAGE plpgsql; +""" + +reverse_sql = """ +DROP FUNCTION IF EXISTS row_exists_not_trashed(integer, integer); +""" + + +class Migration(migrations.Migration): + dependencies = [ + ("database", "0203_alter_field_field_dependencies"), + ] + + operations = [ + migrations.RunSQL(forward_sql, reverse_sql), + ] diff --git a/backend/src/baserow/contrib/database/search_types.py b/backend/src/baserow/contrib/database/search_types.py index 327dd21ad3..a7663c4088 100644 --- a/backend/src/baserow/contrib/database/search_types.py +++ b/backend/src/baserow/contrib/database/search_types.py @@ -27,6 +27,7 @@ from baserow.contrib.database.models import Database from baserow.contrib.database.search.handler import SearchHandler from baserow.contrib.database.search_base import DatabaseSearchableItemType +from baserow.contrib.database.table.expressions import RowNotTrashedDynamicTable from baserow.contrib.database.table.models import Table from baserow.contrib.database.table.operations import ReadDatabaseTableOperationType from baserow.core.db import specific_iterator @@ -352,9 +353,14 @@ def get_union_values_queryset(self, user, workspace, context) -> QuerySet: self.type, getattr(self, "priority", 10) ) + field_ids_by_table_id = defaultdict(list) + for f_id, t_id in base_fields: + field_ids_by_table_id[t_id].append(f_id) + # Build field_id -> table_id mapping for CASE expression when_clauses = [ - When(field_id=f_id, then=Value(t_id)) for (f_id, t_id) in base_fields + When(field_id__in=f_ids, then=Value(t_id)) + for (t_id, f_ids) in field_ids_by_table_id.items() ] table_id_case = Case( *when_clauses, default=Value(0), output_field=IntegerField() @@ -377,6 +383,7 @@ def get_union_values_queryset(self, user, workspace, context) -> QuerySet: .filter(rn=1) # Only keep the best field per row .annotate( search_type=Value(self.type, output_field=TextField()), + is_valid=RowNotTrashedDynamicTable(F("table_id"), F("row_id")), object_id=Concat( Cast(F("table_id"), output_field=TextField()), Value("_", output_field=TextField()), @@ -398,6 +405,7 @@ def get_union_values_queryset(self, user, workspace, context) -> QuerySet: query=Value(context.query), ), ) + .filter(is_valid=True) .values( "search_type", "object_id", @@ -547,6 +555,7 @@ def postprocess(self, rows: Iterable[Dict]) -> List[SearchResult]: field_id_int = int(field_id) table_id_int = field_id_to_table_id.get(field_id_int) or int(table_id) + database_id = table_id_to_database_id.get(table_id_int) database_name = database_id_to_name.get(database_id) workspace_id = database_id_to_workspace_id.get(database_id) diff --git a/backend/src/baserow/contrib/database/table/expressions.py b/backend/src/baserow/contrib/database/table/expressions.py index 294c64df96..8661efadb7 100644 --- a/backend/src/baserow/contrib/database/table/expressions.py +++ b/backend/src/baserow/contrib/database/table/expressions.py @@ -1,5 +1,5 @@ from django.contrib.postgres.fields import ArrayField -from django.db.models import Func, IntegerField, TextField +from django.db.models import BooleanField, Func, IntegerField, TextField class BaserowTableRowCount(Func): @@ -13,3 +13,13 @@ class BaserowTableFileUniques(Func): function = "get_distinct_baserow_table_file_uniques" output_field = ArrayField(TextField()) arity = 1 + + +class RowNotTrashedDynamicTable(Func): + """ + Check if a row exists and is not trashed in a dynamically named table. + Calls the `row_exists_not_trashed(table_id, row_id)` PL/pgSQL function. + """ + + function = "row_exists_not_trashed" + output_field = BooleanField() diff --git a/backend/tests/baserow/contrib/database/search/test_database_search_types.py b/backend/tests/baserow/contrib/database/search/test_database_search_types.py index 04cf5e5cf5..cf4f873d3c 100644 --- a/backend/tests/baserow/contrib/database/search/test_database_search_types.py +++ b/backend/tests/baserow/contrib/database/search/test_database_search_types.py @@ -357,3 +357,44 @@ def test_row_search_multiple_fields(data_fixture): assert len(results) >= 2 assert results[0].id == f"{table.id}_{row1.id}" assert results[1].id == f"{table.id}_{row2.id}" + + +@pytest.mark.workspace_search +@pytest.mark.django_db(transaction=True) +def test_row_search_excludes_trashed_rows(data_fixture): + user = data_fixture.create_user() + workspace = data_fixture.create_workspace(user=user) + database = data_fixture.create_database_application(workspace=workspace) + table = data_fixture.create_database_table(database=database) + text_field = data_fixture.create_text_field(table=table, name="Name", primary=True) + + from baserow.contrib.database.rows.handler import RowHandler + from baserow.core.search.handler import WorkspaceSearchHandler + + row_handler = RowHandler() + row1 = row_handler.create_rows( + user=user, + table=table, + rows_values=[{f"field_{text_field.id}": "Searchable normal"}], + ).created_rows[0] + + row2 = row_handler.create_rows( + user=user, + table=table, + rows_values=[{f"field_{text_field.id}": "Searchable trashed"}], + ).created_rows[0] + + SearchHandler.create_workspace_search_table_if_not_exists(workspace.id) + SearchHandler.initialize_missing_search_data(table) + SearchHandler.process_search_data_updates(table) + + model = table.get_model() + model.objects_and_trash.filter(id=row2.id).update(trashed=True) + + context = SearchContext(query="Searchable", limit=10, offset=0) + results, _ = WorkspaceSearchHandler().search_all_types(user, workspace, context) + + row_results = [r for r in results if r.type == "database_row"] + assert len(row_results) == 1 + assert row_results[0].id == f"{table.id}_{row1.id}" + assert "normal" in row_results[0].title.lower() diff --git a/changelog/entries/unreleased/bug/4525_do_not_show_trashed_rows_in_search_results.json b/changelog/entries/unreleased/bug/4525_do_not_show_trashed_rows_in_search_results.json new file mode 100644 index 0000000000..37f63bdbe9 --- /dev/null +++ b/changelog/entries/unreleased/bug/4525_do_not_show_trashed_rows_in_search_results.json @@ -0,0 +1,9 @@ +{ + "type": "bug", + "message": "Do not show trashed rows in search results", + "issue_origin": "github", + "issue_number": 4525, + "domain": "database", + "bullet_points": [], + "created_at": "2026-01-09" +} \ No newline at end of file