zf

zenflows testing
git clone https://s.sonu.ch/~srfsh/zf.git
Log | Files | Refs | Submodules | README | LICENSE

commit f5ca1be4938a3ceee04283d1bce0f221c8181c40
parent 23a9fc500ec008a9287f1ba1387a5629cfd8fae7
Author: srfsh <dev@srf.sh>
Date:   Thu, 20 Oct 2022 20:15:24 +0300

dep: update ecto_sql v3.9.0

Diffstat:
M.deps/ecto/.hex | 0
M.deps/ecto/CHANGELOG.md | 42++++++++++++++++++++++++++++++++++++++++++
M.deps/ecto/README.md | 2+-
M.deps/ecto/hex_metadata.config | 2+-
M.deps/ecto/integration_test/cases/preload.exs | 60++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
M.deps/ecto/integration_test/cases/repo.exs | 139+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
M.deps/ecto/integration_test/cases/type.exs | 66++++++++++++++++++++++++++++++++++++++++--------------------------
M.deps/ecto/integration_test/support/schemas.exs | 37++++++++++++++++++++++++++++++++++++-
M.deps/ecto/lib/ecto.ex | 2+-
M.deps/ecto/lib/ecto/adapter/queryable.ex | 7++++---
M.deps/ecto/lib/ecto/association.ex | 23+++++++++++++----------
M.deps/ecto/lib/ecto/changeset.ex | 157+++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------
M.deps/ecto/lib/ecto/changeset/relation.ex | 7++++++-
M.deps/ecto/lib/ecto/embedded.ex | 4++++
M.deps/ecto/lib/ecto/enum.ex | 50+++++++++++++++++++++++++++++++++++++++++++++++---
M.deps/ecto/lib/ecto/multi.ex | 13+++++++------
M.deps/ecto/lib/ecto/parameterized_type.ex | 2+-
M.deps/ecto/lib/ecto/query.ex | 323++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------
M.deps/ecto/lib/ecto/query/api.ex | 49+++++++++++++++++++++++++++++++++++++++++++++++--
M.deps/ecto/lib/ecto/query/builder.ex | 88+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
M.deps/ecto/lib/ecto/query/builder/cte.ex | 27++++++++++++++++++++++-----
M.deps/ecto/lib/ecto/query/builder/distinct.ex | 10+++++-----
M.deps/ecto/lib/ecto/query/builder/dynamic.ex | 55++++++++++++++++++++++++++++++++++++++++++-------------
M.deps/ecto/lib/ecto/query/builder/filter.ex | 24++++++++++++------------
M.deps/ecto/lib/ecto/query/builder/from.ex | 32+++++++++++++++++++++++---------
M.deps/ecto/lib/ecto/query/builder/group_by.ex | 6+++---
M.deps/ecto/lib/ecto/query/builder/join.ex | 10+++++-----
M.deps/ecto/lib/ecto/query/builder/limit_offset.ex | 2+-
M.deps/ecto/lib/ecto/query/builder/lock.ex | 2+-
M.deps/ecto/lib/ecto/query/builder/order_by.ex | 6+++---
M.deps/ecto/lib/ecto/query/builder/select.ex | 252++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------
M.deps/ecto/lib/ecto/query/builder/update.ex | 2+-
M.deps/ecto/lib/ecto/query/builder/windows.ex | 6+++---
M.deps/ecto/lib/ecto/query/inspect.ex | 35++++++++++++++---------------------
M.deps/ecto/lib/ecto/query/planner.ex | 254++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------
M.deps/ecto/lib/ecto/repo.ex | 99+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------
M.deps/ecto/lib/ecto/repo/preloader.ex | 85++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------
M.deps/ecto/lib/ecto/repo/queryable.ex | 20++++++++++++++------
M.deps/ecto/lib/ecto/repo/schema.ex | 176++++++++++++++++++++++++++++++++++++++++++-------------------------------------
M.deps/ecto/lib/ecto/repo/supervisor.ex | 2+-
M.deps/ecto/lib/ecto/schema.ex | 14++++++++++++--
M.deps/ecto/mix.exs | 2+-
M.deps/ecto_sql/.hex | 0
M.deps/ecto_sql/CHANGELOG.md | 14+++++++++++++-
M.deps/ecto_sql/hex_metadata.config | 11+++++------
M.deps/ecto_sql/integration_test/sql/logging.exs | 558++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
M.deps/ecto_sql/integration_test/sql/migrator.exs | 21+++++++++++++++++++++
M.deps/ecto_sql/integration_test/support/migration.exs | 12++++++++++++
M.deps/ecto_sql/lib/ecto/adapter/structure.ex | 21+++++++++++++++++++++
D.deps/ecto_sql/lib/ecto/adapters/mysql.ex | 23-----------------------
M.deps/ecto_sql/lib/ecto/adapters/myxql.ex | 30++++++++++++++++++++++--------
M.deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex | 13++++++++++++-
M.deps/ecto_sql/lib/ecto/adapters/postgres.ex | 124++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
M.deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex | 17++++++++++++++++-
M.deps/ecto_sql/lib/ecto/adapters/sql.ex | 50+++++++++-----------------------------------------
M.deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex | 2+-
M.deps/ecto_sql/lib/ecto/adapters/tds.ex | 2+-
M.deps/ecto_sql/lib/ecto/adapters/tds/connection.ex | 19++++++++++++++-----
M.deps/ecto_sql/lib/ecto/migration.ex | 198++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------
M.deps/ecto_sql/lib/ecto/migration/runner.ex | 90+++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------
M.deps/ecto_sql/lib/ecto/migration/schema_migration.ex | 7++++++-
M.deps/ecto_sql/lib/ecto/migrator.ex | 24------------------------
M.deps/ecto_sql/mix.exs | 6+++---
M.deps/postgrex/.hex | 0
M.deps/postgrex/CHANGELOG.md | 13+++++++++++++
M.deps/postgrex/hex_metadata.config | 2+-
M.deps/postgrex/lib/postgrex.ex | 38+++++++++++++++++++++++++++++++++-----
M.deps/postgrex/lib/postgrex/extensions/array.ex | 14+++++++-------
M.deps/postgrex/lib/postgrex/extensions/bit_string.ex | 10+++++-----
M.deps/postgrex/lib/postgrex/extensions/bool.ex | 8++++----
M.deps/postgrex/lib/postgrex/extensions/box.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/circle.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/date.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/float4.ex | 16++++++++--------
M.deps/postgrex/lib/postgrex/extensions/float8.ex | 16++++++++--------
M.deps/postgrex/lib/postgrex/extensions/hstore.ex | 22+++++++++++-----------
M.deps/postgrex/lib/postgrex/extensions/inet.ex | 12++++++------
M.deps/postgrex/lib/postgrex/extensions/int2.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/int4.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/int8.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/interval.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/json.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/jsonb.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/line.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/line_segment.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/macaddr.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/name.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/numeric.ex | 38+++++++++++++++++++++++++++-----------
M.deps/postgrex/lib/postgrex/extensions/oid.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/path.ex | 8++++----
M.deps/postgrex/lib/postgrex/extensions/point.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/polygon.ex | 8++++----
M.deps/postgrex/lib/postgrex/extensions/range.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/raw.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/record.ex | 10+++++-----
M.deps/postgrex/lib/postgrex/extensions/tid.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/time.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/timestamp.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/timestamptz.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/timetz.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/tsvector.ex | 8++++----
M.deps/postgrex/lib/postgrex/extensions/uuid.ex | 6+++---
M.deps/postgrex/lib/postgrex/extensions/void_binary.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/void_text.ex | 4++--
M.deps/postgrex/lib/postgrex/extensions/xid8.ex | 4++--
M.deps/postgrex/lib/postgrex/messages.ex | 56++++++++++++++++++++++++++++----------------------------
M.deps/postgrex/lib/postgrex/protocol.ex | 71+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
M.deps/postgrex/lib/postgrex/replication_connection.ex | 2+-
M.deps/postgrex/lib/postgrex/result.ex | 4++--
M.deps/postgrex/lib/postgrex/simple_connection.ex | 2+-
M.deps/postgrex/lib/postgrex/type_module.ex | 30+++++++++++++++---------------
M.deps/postgrex/lib/postgrex/types.ex | 4++--
M.deps/postgrex/lib/postgrex/utils.ex | 9++++++++-
M.deps/postgrex/mix.exs | 2+-
Mmix.exs | 2+-
Mmix.lock | 6+++---
116 files changed, 3016 insertions(+), 949 deletions(-)

diff --git a/.deps/ecto/.hex b/.deps/ecto/.hex Binary files differ. diff --git a/.deps/ecto/CHANGELOG.md b/.deps/ecto/CHANGELOG.md @@ -1,5 +1,47 @@ # Changelog for v3.x +## v3.9.1 (2022-10-06) + +### Enhancements + + * [Ecto.Query] Allow `selected_as` at the root of `dynamic/2` + * [Ecto.Query] Allow `selected_as` to be used with `type/2` + * [Ecto.Query] Allow `selected_as` to be used with `select_merge` + +### Bug fixes + + * [Ecto.Changeset] Reenable support for embedded schemas in `unsafe_validate_unique/4` + * [Ecto.Query] Ensure `join_where` conditions preload correctly in `many_to_many` or with queries with one or many joins + +## v3.9.0 (2022-09-27) + +### Enhancements + + * [Ecto.Changeset] Add `:force_changes` option to `cast/4` + * [Ecto.Enum] Allow enum fields to be embed either as their values or their dumped versions + * [Ecto.Query] Support `^%{field: dynamic(...)}` in `select` and `select_merge` + * [Ecto.Query] Support `%{field: subquery(...)}` in `select` and `select_merge` + * [Ecto.Query] Support select aliases through `selected_as/1` and `selected_as/2` + * [Ecto.Query] Allow `parent_as/1` in `type/2` + * [Ecto.Query] Add `with_named_binding/3` + * [Ecto.Query] Allow fragment sources in keyword queries + * [Ecto.Repo] Support `idle_interval` query parameter in connection URL + * [Ecto.Repo] Log human-readable UUIDs by using pre-dumped query parameters + * [Ecto.Schema] Support preloading associations in embedded schemas + +### Bug fix + + * [Ecto.Changeset] Raise when schemaless changeset or embedded schema is used in `unsafe_validate_unique/4` + * [Ecto.Query] Respect virtual field type in subqueries + * [Ecto.Query] Don't select struct fields overridden with `nil` + * [Ecto.Query] Fix `select_merge` not tracking `load_in_query: false` field + * [Ecto.Query] Fix field source when used in `json_extract_path` + * [Ecto.Query] Properly build CTEs at compile time + * [Ecto.Query] Properly order subqueries in `dynamic` + * [Ecto.Repo] Fix `insert_all` query parameter count when using value queries alongside `placeholder` + * [Ecto.Repo] Raise if combination query is used in a `many` preload + * [Ecto.Schema] Ignore associations that aren't loaded on insert + ## v3.8.4 (2022-06-04) ### Enhancements diff --git a/.deps/ecto/README.md b/.deps/ecto/README.md @@ -72,7 +72,7 @@ defmodule Sample.App do end ``` -Ecto is commonly used to interact with databases, such as Postgres and MySQL via [Ecto.Adapters.SQL](https://hexdocs.pm/ecto_sql) ([source code](https://github.com/elixir-ecto/ecto_sql)). Ecto is also commonly used to map data from any source into Elixir structs, whether they are backed by a database or not. +Ecto is commonly used to interact with databases, such as PostgreSQL and MySQL via [Ecto.Adapters.SQL](https://hexdocs.pm/ecto_sql) ([source code](https://github.com/elixir-ecto/ecto_sql)). Ecto is also commonly used to map data from any source into Elixir structs, whether they are backed by a database or not. See the [getting started guide](https://hexdocs.pm/ecto/getting-started.html) and the [online documentation](https://hexdocs.pm/ecto) for more information. Other resources available are: diff --git a/.deps/ecto/hex_metadata.config b/.deps/ecto/hex_metadata.config @@ -68,4 +68,4 @@ {<<"optional">>,true}, {<<"repository">>,<<"hexpm">>}, {<<"requirement">>,<<"~> 1.0">>}]]}. -{<<"version">>,<<"3.8.4">>}. +{<<"version">>,<<"3.9.1">>}. diff --git a/.deps/ecto/integration_test/cases/preload.exs b/.deps/ecto/integration_test/cases/preload.exs @@ -10,6 +10,7 @@ defmodule Ecto.Integration.PreloadTest do alias Ecto.Integration.Permalink alias Ecto.Integration.User alias Ecto.Integration.Custom + alias Ecto.Integration.Order test "preload with parameter from select_merge" do p1 = TestRepo.insert!(%Post{title: "p1"}) @@ -708,6 +709,65 @@ defmodule Ecto.Integration.PreloadTest do assert %User{id: ^uid1} = item.user end + describe "preload associations from nested embeds" do + setup do + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "3"}) + item1 = %Item{id: 1, user_id: uid1} + item2 = %Item{id: 2, user_id: uid2} + item3 = %Item{id: 3, user_id: uid3} + order1 = %Order{items: [item1, item3, item2], item: item1} + order2 = %Order{items: [], item: nil} + order3 = %Order{items: nil, item: nil} + order4 = %Order{items: [item1, item2], item: item2} + + [orders: [order1, order2, order3, order4]] + end + + test "cannot preload embed without its associations", context do + assert_raise ArgumentError, ~r/cannot preload embedded field/, fn -> + TestRepo.preload(context.orders, :item) + end + end + + test "embeds_one", context do + [nil | preloaded_orders] = [nil | context.orders] |> TestRepo.preload(item: :user) + + expected_item_user = + Enum.map(context.orders, fn + %{item: nil} -> {nil, nil} + %{item: item} -> {item.id, item.user_id} + end) + + actual_item_user = + Enum.map(preloaded_orders, fn + %{item: nil} -> {nil, nil} + %{item: item} -> {item.id, item.user.id} + end) + + assert expected_item_user == actual_item_user + end + + test "embeds_many", context do + [nil | preloaded_orders] = [nil | context.orders] |> TestRepo.preload(items: :user) + + expected_items_user = + Enum.map(context.orders, fn + %{items: nil} -> {nil, nil} + %{items: items} -> Enum.map(items, & {&1.id, &1.user_id}) + end) + + actual_items_user = + Enum.map(preloaded_orders, fn + %{items: nil} -> {nil, nil} + %{items: items} -> Enum.map(items, & {&1.id, &1.user.id}) + end) + + assert expected_items_user == actual_items_user + end + end + defp sort_by_id(values) do Enum.sort_by(values, &(&1.id)) end diff --git a/.deps/ecto/integration_test/cases/repo.exs b/.deps/ecto/integration_test/cases/repo.exs @@ -616,7 +616,7 @@ defmodule Ecto.Integration.RepoTest do |> Ecto.Changeset.assoc_constraint(:post) end - test "unsafe_validate_unique/3" do + test "unsafe_validate_unique/4" do {:ok, inserted_post} = TestRepo.insert(%Post{title: "Greetings", visits: 13}) new_post_changeset = Post.changeset(%Post{}, %{title: "Greetings", visits: 17}) @@ -632,7 +632,7 @@ defmodule Ecto.Integration.RepoTest do assert changeset.errors[:title] == nil # cannot conflict with itself end - test "unsafe_validate_unique/3 with composite keys" do + test "unsafe_validate_unique/4 with composite keys" do {:ok, inserted_post} = TestRepo.insert(%CompositePk{a: 123, b: 456, name: "UniqueName"}) different_pk = CompositePk.changeset(%CompositePk{}, %{name: "UniqueName", a: 789, b: 321}) @@ -1075,21 +1075,25 @@ defmodule Ecto.Integration.RepoTest do test "Repo.insert_all upserts and fills in placeholders with conditioned on_conflict query" do do_not_update_title = "don't touch me" + posted_value = + from p in Post, where: p.public == ^true and p.id > ^0, select: p.posted, limit: 1 + on_conflict = from p in Post, update: [set: [title: "updated"]], where: p.title != ^do_not_update_title - placeholders = %{posted: Date.utc_today(), title: "title"} + placeholders = %{visits: 1, title: "title"} post1 = [ + visits: {:placeholder, :visits}, title: {:placeholder, :title}, uuid: Ecto.UUID.generate(), - posted: {:placeholder, :posted} + posted: posted_value ] post2 = [ title: do_not_update_title, uuid: Ecto.UUID.generate(), - posted: {:placeholder, :posted} + posted: posted_value ] assert TestRepo.insert_all(Post, [post1, post2], @@ -1533,6 +1537,111 @@ defmodule Ecto.Integration.RepoTest do assert [%Post{title: "1", counter: 2}] = TestRepo.all(subquery) end + + @tag :selected_as_with_group_by + test "selected_as/2 with group_by" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + query = + from p in Post, + select: %{ + posted: selected_as(p.posted, :date), + min_visits: p.visits |> coalesce(0) |> min() + }, + group_by: selected_as(:date), + order_by: p.posted + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = + TestRepo.all(query) + end + + @tag :selected_as_with_order_by + test "selected_as/2 with order_by" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + base_query = + from p in Post, + select: %{ + posted: p.posted, + min_visits: p.visits |> coalesce(0) |> min() |> selected_as(:min_visits) + }, + group_by: p.posted + + # ascending order + results = base_query |> order_by(selected_as(:min_visits)) |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = + results + + # descending order + results = base_query |> order_by([desc: selected_as(:min_visits)]) |> TestRepo.all() + + assert [%{posted: ~D[2020-12-21], min_visits: 2}, %{posted: ~D[2020-12-20], min_visits: 0}] = + results + end + + @tag :selected_as_with_order_by + test "selected_as/2 respects custom types" do + TestRepo.insert!(%Post{title: "title1", visits: 1}) + TestRepo.insert!(%Post{title: "title2"}) + uuid = Ecto.UUID.generate() + + query = + from p in Post, + select: %{ + uuid: type(^uuid, Ecto.UUID) |> selected_as(:uuid), + visits: p.visits |> coalesce(0) |> selected_as(:visits) + }, + order_by: [selected_as(:uuid), selected_as(:visits)] + + assert [%{uuid: ^uuid, visits: 0}, %{uuid: ^uuid, visits: 1}] = TestRepo.all(query) + end + + @tag :selected_as_with_order_by_expression + test "selected_as/2 with order_by expression" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3, intensity: 2.0}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil, intensity: 10.0}) + + results = + from(p in Post, + select: %{ + posted: p.posted, + visits: p.visits |> coalesce(0) |> selected_as(:num_visits), + intensity: selected_as(p.intensity, :strength) + }, + order_by: [desc: (selected_as(:num_visits) + selected_as(:strength))] + ) + |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], visits: 0}, %{posted: ~D[2020-12-21], visits: 3}] = + results + end + + @tag :selected_as_with_having + test "selected_as/2 with having" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + results = + from(p in Post, + select: %{ + posted: p.posted, + min_visits: p.visits |> coalesce(0) |> min() |> selected_as(:min_visits) + }, + group_by: p.posted, + having: selected_as(:min_visits) > 0, + or_having: not(selected_as(:min_visits) > 0), + order_by: p.posted + ) + |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = results + end end test "query count distinct" do @@ -1836,7 +1945,7 @@ defmodule Ecto.Integration.RepoTest do @tag :with_conflict_target test "on conflict query and conflict target" do - on_conflict = from Post, update: [set: [title: "second"]] + on_conflict = from p in Post, where: p.id > ^0, update: [set: [title: "second"]] post = [title: "first", uuid: Ecto.UUID.generate()] assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == {1, nil} @@ -1850,6 +1959,24 @@ defmodule Ecto.Integration.RepoTest do assert TestRepo.all(from p in Post, select: p.title) == ["second"] end + @tag :insert_select + @tag :with_conflict_target + test "on conflict query and insert select and conflict target" do + on_conflict = from p in Post, where: p.id > ^0, update: [set: [title: "second"]] + visits_value = from p in Post, where: p.public == ^true and p.id > ^0, select: p.visits, limit: 1 + post = [title: "first", uuid: Ecto.UUID.generate(), visits: visits_value] + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + @tag :returning @tag :with_conflict_target test "on conflict query and conflict target and returning" do diff --git a/.deps/ecto/integration_test/cases/type.exs b/.deps/ecto/integration_test/cases/type.exs @@ -1,7 +1,7 @@ defmodule Ecto.Integration.TypeTest do use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) - alias Ecto.Integration.{Custom, Item, ItemColor, Order, Post, User, Tag, Usec} + alias Ecto.Integration.{Comment, Custom, Item, ItemColor, Order, Post, User, Tag, Usec} alias Ecto.Integration.TestRepo import Ecto.Query @@ -114,7 +114,8 @@ defmodule Ecto.Integration.TypeTest do end test "tagged types" do - TestRepo.insert!(%Post{visits: 12}) + %{id: post_id} = TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Comment{text: "#{post_id}", post_id: post_id}) # Numbers assert [1] = TestRepo.all(from Post, select: type(^"1", :integer)) @@ -141,6 +142,11 @@ defmodule Ecto.Integration.TypeTest do # Comparison expression assert [12] = TestRepo.all(from p in Post, select: type(coalesce(p.visits, 0), :integer)) assert [1.0] = TestRepo.all(from p in Post, select: type(coalesce(p.intensity, 1.0), :float)) + + # parent_as/1 + child = from c in Comment, where: type(parent_as(:posts).id, :string) == c.text, select: c.post_id + query = from p in Post, as: :posts, where: p.id in subquery(child), select: p.id + assert [post_id] == TestRepo.all(query) end test "binary id type" do @@ -294,7 +300,7 @@ defmodule Ecto.Integration.TypeTest do @tag :map_type @tag :json_extract_path test "json_extract_path with primitive values" do - order = %Order{meta: + order = %Order{metadata: %{ :id => 123, :time => ~T[09:00:00], @@ -308,44 +314,44 @@ defmodule Ecto.Integration.TypeTest do order = TestRepo.insert!(order) - assert TestRepo.one(from o in Order, select: o.meta["id"]) == 123 - assert TestRepo.one(from o in Order, select: o.meta["bad"]) == nil - assert TestRepo.one(from o in Order, select: o.meta["bad"]["bad"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["id"]) == 123 + assert TestRepo.one(from o in Order, select: o.metadata["bad"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["bad"]["bad"]) == nil field = "id" - assert TestRepo.one(from o in Order, select: o.meta[^field]) == 123 - assert TestRepo.one(from o in Order, select: o.meta["time"]) == "09:00:00" - assert TestRepo.one(from o in Order, select: o.meta["'single quoted'"]) == "bar" - assert TestRepo.one(from o in Order, select: o.meta["';"]) == nil - assert TestRepo.one(from o in Order, select: o.meta["\"double quoted\""]) == "baz" - assert TestRepo.one(from o in Order, select: o.meta["enabled"]) == true - assert TestRepo.one(from o in Order, select: o.meta["extra"][0]["enabled"]) == false + assert TestRepo.one(from o in Order, select: o.metadata[^field]) == 123 + assert TestRepo.one(from o in Order, select: o.metadata["time"]) == "09:00:00" + assert TestRepo.one(from o in Order, select: o.metadata["'single quoted'"]) == "bar" + assert TestRepo.one(from o in Order, select: o.metadata["';"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["\"double quoted\""]) == "baz" + assert TestRepo.one(from o in Order, select: o.metadata["enabled"]) == true + assert TestRepo.one(from o in Order, select: o.metadata["extra"][0]["enabled"]) == false # where - assert TestRepo.one(from o in Order, where: o.meta["id"] == 123, select: o.id) == order.id - assert TestRepo.one(from o in Order, where: o.meta["id"] == 456, select: o.id) == nil - assert TestRepo.one(from o in Order, where: o.meta["code"] == "good", select: o.id) == order.id - assert TestRepo.one(from o in Order, where: o.meta["code"] == "bad", select: o.id) == nil - assert TestRepo.one(from o in Order, where: o.meta["enabled"] == true, select: o.id) == order.id - assert TestRepo.one(from o in Order, where: o.meta["extra"][0]["enabled"] == false, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["id"] == 123, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["id"] == 456, select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["code"] == "good", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["code"] == "bad", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["enabled"] == true, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["extra"][0]["enabled"] == false, select: o.id) == order.id end @tag :map_type @tag :json_extract_path test "json_extract_path with arrays and objects" do - order = %Order{meta: %{tags: [%{name: "red"}, %{name: "green"}]}} + order = %Order{metadata: %{tags: [%{name: "red"}, %{name: "green"}]}} order = TestRepo.insert!(order) - assert TestRepo.one(from o in Order, select: o.meta["tags"][0]["name"]) == "red" - assert TestRepo.one(from o in Order, select: o.meta["tags"][99]["name"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["tags"][0]["name"]) == "red" + assert TestRepo.one(from o in Order, select: o.metadata["tags"][99]["name"]) == nil index = 1 - assert TestRepo.one(from o in Order, select: o.meta["tags"][^index]["name"]) == "green" + assert TestRepo.one(from o in Order, select: o.metadata["tags"][^index]["name"]) == "green" # where - assert TestRepo.one(from o in Order, where: o.meta["tags"][0]["name"] == "red", select: o.id) == order.id - assert TestRepo.one(from o in Order, where: o.meta["tags"][0]["name"] == "blue", select: o.id) == nil - assert TestRepo.one(from o in Order, where: o.meta["tags"][99]["name"] == "red", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "red", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "blue", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["tags"][99]["name"] == "red", select: o.id) == nil end @tag :map_type @@ -358,6 +364,14 @@ defmodule Ecto.Integration.TypeTest do end @tag :map_type + @tag :json_extract_path + test "json_extract_path with custom field source" do + order = TestRepo.insert!(%Order{metadata: %{tags: [%{name: "red"}, %{name: "green"}]}}) + + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "red", select: o.id) == order.id + end + + @tag :map_type @tag :map_type_schemaless test "embeds one with custom type" do item = %Item{price: 123, reference: "PREFIX-EXAMPLE"} diff --git a/.deps/ecto/integration_test/support/schemas.exs b/.deps/ecto/integration_test/support/schemas.exs @@ -264,12 +264,15 @@ defmodule Ecto.Integration.Order do * Text columns * Embedding one schema + * Preloading items inside embeds_many + * Preloading items inside embeds_one + * Field source with json_extract_path """ use Ecto.Integration.Schema schema "orders" do - field :meta, :map + field :metadata, :map, source: :meta embeds_one :item, Ecto.Integration.Item embeds_many :items, Ecto.Integration.Item belongs_to :permalink, Ecto.Integration.Permalink @@ -343,3 +346,35 @@ defmodule Ecto.Integration.Usec do field :utc_datetime_usec, :utc_datetime_usec end end + +defmodule Ecto.Integration.Logging do + @moduledoc """ + This module is used to test: + + * Logging the casted version of parameters without array types + + """ + use Ecto.Integration.Schema + + @primary_key {:bid, :binary_id, autogenerate: true} + schema "loggings" do + field :int, :integer + field :uuid, Ecto.Integration.TestRepo.uuid() + timestamps() + end +end + +defmodule Ecto.Integration.ArrayLogging do + @moduledoc """ + This module is used to test: + + * Logging the casted version of parameters with array types + + """ + use Ecto.Integration.Schema + + schema "array_loggings" do + field :uuids, {:array, Ecto.Integration.TestRepo.uuid()} + timestamps() + end +end diff --git a/.deps/ecto/lib/ecto.ex b/.deps/ecto/lib/ecto.ex @@ -25,7 +25,7 @@ defmodule Ecto do * `Ecto.Changeset` - **how to change** the data Besides the four components above, most developers use Ecto to interact - with SQL databases, such as Postgres and MySQL via the + with SQL databases, such as PostgreSQL and MySQL via the [`ecto_sql`](https://hexdocs.pm/ecto_sql) project. `ecto_sql` provides many conveniences for working with SQL databases as well as the ability to version how your database changes through time via diff --git a/.deps/ecto/lib/ecto/adapter/queryable.ex b/.deps/ecto/lib/ecto/adapter/queryable.ex @@ -103,13 +103,13 @@ defmodule Ecto.Adapter.Queryable do def prepare_query(operation, repo_name_or_pid, queryable) do %{adapter: adapter, cache: cache} = Ecto.Repo.Registry.lookup(repo_name_or_pid) - {_meta, prepared, params} = + {_meta, prepared, _cast_params, dump_params} = queryable |> Ecto.Queryable.to_query() |> Ecto.Query.Planner.ensure_select(operation == :all) |> Ecto.Query.Planner.query(operation, cache, adapter, 0) - {prepared, params} + {prepared, dump_params} end @doc """ @@ -120,7 +120,8 @@ defmodule Ecto.Adapter.Queryable do def plan_query(operation, adapter, queryable) do query = Ecto.Queryable.to_query(queryable) {query, params, _key} = Ecto.Query.Planner.plan(query, operation, adapter) + {cast_params, dump_params} = Enum.unzip(params) {query, _} = Ecto.Query.Planner.normalize(query, operation, adapter, 0) - {query, params} + {query, cast_params, dump_params} end end diff --git a/.deps/ecto/lib/ecto/association.ex b/.deps/ecto/lib/ecto/association.ex @@ -441,10 +441,11 @@ defmodule Ecto.Association do def validate_defaults!(_module, _name, defaults) when is_list(defaults), do: defaults - def validate_defaults!(_module, name, defaults), - do: raise ArgumentError, - "expected defaults for #{inspect name} to be a keyword list " <> - "or a {module, fun, args} tuple, got: `#{inspect defaults}`" + def validate_defaults!(_module, name, defaults) do + raise ArgumentError, + "expected defaults for #{inspect name} to be a keyword list " <> + "or a {module, fun, args} tuple, got: `#{inspect defaults}`" + end @doc """ Validates `preload_order` for association named `name`. @@ -1266,12 +1267,14 @@ defmodule Ecto.Association.ManyToMany do # We only need to join in the "join table". Preload and Ecto.assoc expressions can then filter # by &1.join_owner_key in ^... to filter down to the associated entries in the related table. - from(q in (query || queryable), - join: j in ^join_through, on: field(q, ^related_key) == field(j, ^join_related_key), - where: field(j, ^join_owner_key) in type(^values, {:in, ^owner_key_type}) - ) - |> Ecto.Association.combine_assoc_query(assoc.where) - |> Ecto.Association.combine_joins_query(assoc.join_where, 1) + query = + from(q in (query || queryable), + join: j in ^join_through, on: field(q, ^related_key) == field(j, ^join_related_key), + where: field(j, ^join_owner_key) in type(^values, {:in, ^owner_key_type}) + ) + |> Ecto.Association.combine_assoc_query(assoc.where) + + Ecto.Association.combine_joins_query(query, assoc.join_where, length(query.joins)) end @impl true diff --git a/.deps/ecto/lib/ecto/changeset.ex b/.deps/ecto/lib/ecto/changeset.ex @@ -39,7 +39,7 @@ defmodule Ecto.Changeset do Some validations may happen against the database but they are inherently unsafe. Those validations start with a `unsafe_` - prefix, such as `unsafe_validate_unique/3`. + prefix, such as `unsafe_validate_unique/4`. On the other hand, constraints rely on the database and are always safe. As a consequence, validations are always checked before constraints. @@ -109,10 +109,14 @@ defmodule Ecto.Changeset do regarding empty values. For example, if you are gathering data to be cast from the command line or through an HTML form or any other text-based format, it is likely those means cannot express nil values. For - those reasons, changesets include the concept of empty values, which are - values that will be automatically converted to the field's default value - on `cast/4`. Those values are stored in the changeset `empty_values` field - and default to `[""]`. You can also pass the `:empty_values` option to + those reasons, changesets include the concept of empty values. + + When applying changes using `cast/4`, an empty value will be automatically + converted to the field's default value. If the field is an array type, any + empty value inside the array will be removed. + + Empty values are stored in the changeset's `:empty_values` field and + default to `[""]`. You can also pass the `:empty_values` option to `cast/4` in case you want to change how a particular `cast/4` work. ## Associations, embeds and on replace @@ -163,7 +167,7 @@ defmodule Ecto.Changeset do if the entry still exists The `:delete` and `:delete_if_exists` options must be used carefully as they allow - users to delete any associated data by simply not sending the associated data. + users to delete any associated data by simply setting it to `nil` or an empty list. If you need deletion, it is often preferred to add a separate boolean virtual field in the schema and manually mark the changeset for deletion if the `:delete` field is set in the params, as in the example below. Note that we don't call `cast/4` in this @@ -238,7 +242,8 @@ defmodule Ecto.Changeset do This means that when working with changesets that are not meant to be persisted to the database, such as schemaless changesets, you may need to explicitly set the action to one specific value. Frameworks such as - Phoenix use the action value to define how HTML forms should act. + Phoenix [use the action value to define how HTML forms should + act](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#module-a-note-on-errors). Instead of setting the action manually, you may use `apply_action/2` that emulates operations such as `c:Ecto.Repo.insert`. `apply_action/2` will return @@ -275,9 +280,11 @@ defmodule Ecto.Changeset do value `**redacted**`. """ + require Logger require Ecto.Query alias __MODULE__ alias Ecto.Changeset.Relation + alias Ecto.Schema.Metadata @empty_values [""] @@ -429,7 +436,9 @@ defmodule Ecto.Changeset do During casting, all `permitted` parameters whose values match the specified type information will have their key name converted to an atom and stored together with the value as a change in the `:changes` field of the changeset. - All parameters that are not explicitly permitted are ignored. + If the cast value matches the current value for the field, it will not be + included in `:changes` unless the `:force_changes: true` option is + provided. All parameters that are not explicitly permitted are ignored. If casting of all fields is successful, the changeset is returned as valid. @@ -439,7 +448,10 @@ defmodule Ecto.Changeset do ## Options * `:empty_values` - a list of values to be considered as empty when casting. - Empty values are always replaced by the default value of the respective key. Defaults to `[""]` + Empty values are always replaced by the default value of the respective field. + If the field is an array type, any empty value inside of the array will be removed. + Defaults to `[""]` + * `:force_changes` - a boolean indicating whether to include values that don't alter the current data in `:changes`. Defaults to `false` ## Examples @@ -448,6 +460,11 @@ defmodule Ecto.Changeset do ...> Repo.update!(changeset) ...> end + iex> params = %{title: "", topics: [""]} + iex> changeset = cast(post, params, [:topics, :title], empty_values: ["", []]) + iex> changeset.changes + %{title: nil, topics: nil} + Passing a changeset as the first argument: iex> changeset = cast(post, %{title: "Hello"}, [:title]) @@ -509,6 +526,7 @@ defmodule Ecto.Changeset do defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) when is_list(permitted) do empty_values = Keyword.get(opts, :empty_values, @empty_values) + force? = Keyword.get(opts, :force_changes, false) params = convert_params(params) defaults = case data do @@ -518,7 +536,7 @@ defmodule Ecto.Changeset do {changes, errors, valid?} = Enum.reduce(permitted, {changes, [], true}, - &process_param(&1, params, types, data, empty_values, defaults, &2)) + &process_param(&1, params, types, data, empty_values, defaults, force?, &2)) %Changeset{params: params, data: data, valid?: valid?, errors: Enum.reverse(errors), changes: changes, types: types} @@ -529,7 +547,7 @@ defmodule Ecto.Changeset do message: "expected params to be a :map, got: `#{inspect params}`" end - defp process_param(key, params, types, data, empty_values, defaults, {changes, errors, valid?}) do + defp process_param(key, params, types, data, empty_values, defaults, force?, {changes, errors, valid?}) do {key, param_key} = cast_key(key) type = cast_type!(types, key) @@ -539,7 +557,7 @@ defmodule Ecto.Changeset do _ -> Map.get(data, key) end - case cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do + case cast_field(key, param_key, type, params, current, empty_values, defaults, force?, valid?) do {:ok, value, valid?} -> {Map.put(changes, key, value), errors, valid?} :missing -> @@ -571,16 +589,17 @@ defmodule Ecto.Changeset do defp cast_key(key) when is_atom(key), do: {key, Atom.to_string(key)} - defp cast_key(key), - do: raise ArgumentError, "cast/3 expects a list of atom keys, got key: `#{inspect key}`" + defp cast_key(key) do + raise ArgumentError, "cast/3 expects a list of atom keys, got key: `#{inspect key}`" + end - defp cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do + defp cast_field(key, param_key, type, params, current, empty_values, defaults, force?, valid?) do case params do %{^param_key => value} -> value = filter_empty_values(type, value, empty_values, defaults, key) case Ecto.Type.cast(type, value) do {:ok, value} -> - if Ecto.Type.equal?(type, current, value) do + if not force? and Ecto.Type.equal?(type, current, value) do :missing else {:ok, value, valid?} @@ -662,7 +681,7 @@ defmodule Ecto.Changeset do from `changeset.params`. Those parameters are expected to be a map with attributes, similar to the ones passed to `cast/4`. Once parameters are retrieved, `cast_assoc/3` will match those - parameters with the associations already in the changeset record. + parameters with the associations already in the changeset data. Once `cast_assoc/3` is called, Ecto will compare each parameter with the user's already preloaded addresses and act as follows: @@ -683,9 +702,12 @@ defmodule Ecto.Changeset do be invoked (see the "On replace" section on the module documentation) Every time the `MyApp.Address.changeset/2` function is invoked, it must - return a changeset. Once the parent changeset is given to an `Ecto.Repo` - function, all entries will be inserted/updated/deleted within the same - transaction. + return a changeset. This changeset will always be included under `changes` + of the parent changeset, even if there are no changes. This is done for + reflection purposes, allowing developers to introspect validations and + other metadata from the association. Once the parent changeset is given + to an `Ecto.Repo` function, all entries will be inserted/updated/deleted + within the same transaction. Note developers are allowed to explicitly set the `:action` field of a changeset to instruct Ecto how to act in certain situations. Let's suppose @@ -1539,6 +1561,8 @@ defmodule Ecto.Changeset do iex> {:ok, data} = apply_action(changeset, :update) + iex> {:ok, data} = apply_action(changeset, :my_action) + iex> {:error, changeset} = apply_action(changeset, :update) %Ecto.Changeset{action: :update} """ @@ -1838,11 +1862,6 @@ defmodule Ecto.Changeset do * `:message` - the message in case the constraint check fails, defaults to "has already been taken". - * `:match` - how the changeset constraint name is matched against the - repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`. - `:suffix` matches any repo constraint which `ends_with?` `:name` - to this changeset constraint. - * `:error_key` - the key to which changeset error will be added when check fails, defaults to the first field name of the given list of fields. @@ -1867,16 +1886,33 @@ defmodule Ecto.Changeset do """ @spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t, Keyword.t) :: t - def unsafe_validate_unique(changeset, fields, repo, opts \\ []) when is_list(opts) do - fields = List.wrap(fields) + def unsafe_validate_unique(%Changeset{} = changeset, fields, repo, opts \\ []) when is_list(opts) do {repo_opts, opts} = Keyword.pop(opts, :repo_opts, []) - {validations, schema} = - case changeset do - %Ecto.Changeset{validations: validations, data: %schema{}} -> - {validations, schema} - %Ecto.Changeset{} -> - raise ArgumentError, "unsafe_validate_unique/4 does not work with schemaless changesets" + %{data: data, validations: validations} = changeset + + unless is_struct(data) and function_exported?(data.__struct__, :__schema__, 1) do + raise ArgumentError, + "unsafe_validate_unique/4 does not work with schemaless changesets, got #{inspect(data)}" + end + + schema = + case {changeset.data, opts[:query]} do + # regular schema + {%schema{__meta__: %Metadata{}}, _} -> + schema + + # embedded schema with base query + {%schema{}, base_query} when base_query != nil -> + schema + + # embedded schema without base query + {data, _} -> + raise ArgumentError, + "unsafe_validate_unique/4 does not work with embedded schemas unless " <> + "the `:query` option is specified, got: #{inspect(data)}" end + + fields = List.wrap(fields) changeset = %{changeset | validations: [{hd(fields), {:unsafe_unique, fields: fields}} | validations]} where_clause = for field <- fields do @@ -1892,7 +1928,7 @@ defmodule Ecto.Changeset do # If we don't have values for all fields, we can't query for uniqueness any_nil_values_for_fields? = Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil())) - if unrelated_changes? || any_nil_values_for_fields? || any_prior_errors_for_fields? do + if unrelated_changes? or any_nil_values_for_fields? or any_prior_errors_for_fields? do changeset else query = @@ -2503,8 +2539,15 @@ defmodule Ecto.Changeset do put_in(changeset.changes[field], incrementer.(current)) end) - changeset = put_in(changeset.filters[field], current) - changeset + if is_nil(current) do + Logger.warn """ + the current value of `#{field}` is `nil` and will not be used as a filter for optimistic + locking. To ensure `#{field}` is never `nil`, consider setting a default value. + """ + changeset + else + put_in(changeset.filters[field], current) + end end # increment_with_rollover expect to be used with lock_version set as :integer in db schema @@ -2598,11 +2641,13 @@ defmodule Ecto.Changeset do cast(user, params, [:age]) |> check_constraint(:age, name: :age_must_be_positive) - Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the - age is not positive, it will be converted into an error and - `{:error, changeset}` returned by the repository. Note that the error - will occur only after hitting the database so it will not be visible - until all other validations pass. + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the age is not positive, the underlying operation will fail + but Ecto will convert the database exception into a changeset error + and return an `{:error, changeset}` tuple. Note that the error will + occur only after hitting the database, so it will not be visible + until all other validations pass. If the constraint fails inside a + transaction, the transaction will be marked as aborted. ## Options @@ -2642,11 +2687,13 @@ defmodule Ecto.Changeset do cast(user, params, [:email]) |> unique_constraint(:email) - Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the - email already exists, it will be converted into an error and - `{:error, changeset}` returned by the repository. Note that the error - will occur only after hitting the database so it will not be visible - until all other validations pass. + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the email already exists, the underlying operation will fail but + Ecto will convert the database exception into a changeset error and + return an `{:error, changeset}` tuple. Note that the error will occur + only after hitting the database, so it will not be visible until all + other validations pass. If the constraint fails inside a transaction, + the transaction will be marked as aborted. ## Options @@ -2784,9 +2831,13 @@ defmodule Ecto.Changeset do cast(comment, params, [:post_id]) |> foreign_key_constraint(:post_id) - Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the - associated post does not exist, it will be converted into an - error and `{:error, changeset}` returned by the repository. + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the associated post does not exist, the underlying operation will + fail but Ecto will convert the database exception into a changeset + error and return an `{:error, changeset}` tuple. Note that the error + will occur only after hitting the database, so it will not be visible + until all other validations pass. If the constraint fails inside a + transaction, the transaction will be marked as aborted. ## Options @@ -2977,10 +3028,14 @@ defmodule Ecto.Changeset do defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source), do: source - defp get_source(%{data: data}), do: + + defp get_source(%{data: data}) do raise ArgumentError, "cannot add constraint to changeset because it does not have a source, got: #{inspect data}" - defp get_source(item), do: + end + + defp get_source(item) do raise ArgumentError, "cannot add constraint because a changeset was not supplied, got: #{inspect item}" + end defp get_assoc(%{types: types}, assoc) do case Map.fetch(types, assoc) do diff --git a/.deps/ecto/lib/ecto/changeset/relation.ex b/.deps/ecto/lib/ecto/changeset/relation.ex @@ -537,7 +537,7 @@ defmodule Ecto.Changeset.Relation do # This is partly reimplementing the logic behind put_relation # in Ecto.Changeset but we need to do it in a way where we have # control over the current value. - value = load!(struct, Map.get(struct, field)) + value = not_loaded_to_empty(Map.get(struct, field)) empty = empty(embed_or_assoc) case change(embed_or_assoc, value, empty) do {:ok, change, _} when change != empty -> @@ -562,4 +562,9 @@ defmodule Ecto.Changeset.Relation do _ -> %{changeset | errors: errors ++ changeset.errors, valid?: false, changes: changes} end end + + defp not_loaded_to_empty(%NotLoaded{__cardinality__: cardinality}), + do: cardinality_to_empty(cardinality) + + defp not_loaded_to_empty(loaded), do: loaded end diff --git a/.deps/ecto/lib/ecto/embedded.ex b/.deps/ecto/lib/ecto/embedded.ex @@ -283,4 +283,8 @@ defmodule Ecto.Embedded do def build(%Embedded{related: related}, _owner) do related.__struct__ end + + def preload_info(_embed) do + :embed + end end diff --git a/.deps/ecto/lib/ecto/enum.ex b/.deps/ecto/lib/ecto/enum.ex @@ -49,13 +49,35 @@ defmodule Ecto.Enum do end end - you can call `mappings/2` like this: + You can call `mappings/2` like this: Ecto.Enum.mappings(EnumSchema, :my_enum) #=> [foo: "foo", bar: "bar", baz: "baz"] If you want the values only, you can use `Ecto.Enum.values/2`, and if you want the dump values only, you can use `Ecto.Enum.dump_values/2`. + + ## Embeds + + `Ecto.Enum` allows to customize how fields are dumped within embeds through the + `:embed_as` option. Two alternatives are supported: `:values`, which will save + the enum keys (and not their respective mapping), and `:dumped`, which will save + the dumped value. The default is `:values`. For example, assuming the following + schema: + + defmodule EnumSchema do + use Ecto.Schema + + schema "my_schema" do + embeds_one :embed, Embed do + field :embed_as_values, Ecto.Enum, values: [foo: 1, bar: 2], embed_as: :values + field :embed_as_dump, Ecto.Enum, values: [foo: 1, bar: 2], embed_as: :dump + end + end + end + + The `:embed_as_values` field value will save `:foo | :bar`, while the + `:embed_as_dump` field value will save as `1 | 2`. """ use Ecto.ParameterizedType @@ -97,7 +119,29 @@ defmodule Ecto.Enum do on_dump = Map.new(mappings) on_cast = Map.new(mappings, fn {key, _} -> {Atom.to_string(key), key} end) - %{on_load: on_load, on_dump: on_dump, on_cast: on_cast, mappings: mappings, type: type} + embed_as = + case Keyword.get(opts, :embed_as, :values) do + :values -> + :self + + :dumped -> + :dump + + other -> + raise ArgumentError, """ + the `:embed_as` option for `Ecto.Enum` accepts either `:values` or `:dumped`, + received: `#{inspect(other)}` + """ + end + + %{ + on_load: on_load, + on_dump: on_dump, + on_cast: on_cast, + mappings: mappings, + embed_as: embed_as, + type: type + } end defp validate_unique!(values) do @@ -162,7 +206,7 @@ defmodule Ecto.Enum do def equal?(a, b, _params), do: a == b @impl true - def embed_as(_, _), do: :self + def embed_as(_, %{embed_as: embed_as}), do: embed_as @doc "Returns the possible values for a given schema and field" @spec values(module, atom) :: [atom()] diff --git a/.deps/ecto/lib/ecto/multi.ex b/.deps/ecto/lib/ecto/multi.ex @@ -406,6 +406,9 @@ defmodule Ecto.Multi do Ecto.Multi.new() |> Ecto.Multi.one(:post, Post) + |> Ecto.Multi.one(:author, fn %{post: post} -> + from(a in Author, where: a.id == ^post.author_id) + end) |> MyApp.Repo.transaction() """ @spec one( @@ -434,6 +437,10 @@ defmodule Ecto.Multi do Ecto.Multi.new() |> Ecto.Multi.all(:all, Post) |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.all(:all, fn _changes -> Post end) + |> MyApp.Repo.transaction() """ @spec all( t, @@ -857,12 +864,6 @@ defmodule Ecto.Multi do end end - defp operation_fun({:one!, fun}, opts) do - fn repo, changes -> - {:ok, repo.one!(fun.(changes), opts)} - end - end - defp operation_fun({:all, fun}, opts) do fn repo, changes -> {:ok, repo.all(fun.(changes), opts)} diff --git a/.deps/ecto/lib/ecto/parameterized_type.ex b/.deps/ecto/lib/ecto/parameterized_type.ex @@ -40,7 +40,7 @@ defmodule Ecto.ParameterizedType do def cast(data, params) do ... - cast_data + {:ok, cast_data} end def load(data, _loader, params) do diff --git a/.deps/ecto/lib/ecto/query.ex b/.deps/ecto/lib/ecto/query.ex @@ -385,7 +385,7 @@ defmodule Ecto.Query do defmodule FromExpr do @moduledoc false - defstruct [:source, :as, :prefix, hints: []] + defstruct [:source, :file, :line, :as, :prefix, params: [], hints: []] end defmodule DynamicExpr do @@ -405,7 +405,7 @@ defmodule Ecto.Query do defmodule SelectExpr do @moduledoc false - defstruct [:expr, :file, :line, :fields, params: [], take: %{}] + defstruct [:expr, :file, :line, :fields, params: [], take: %{}, subqueries: [], aliases: %{}] end defmodule JoinExpr do @@ -423,7 +423,7 @@ defmodule Ecto.Query do # * value is the tagged value # * tag is the directly tagged value, like Ecto.UUID # * type is the underlying tag type, like :string - defstruct [:value, :tag, :type] + defstruct [:tag, :type, :value] end @type t :: %__MODULE__{} @@ -490,7 +490,7 @@ defmodule Ecto.Query do order_by = [ asc: :some_field, - desc: dynamic([p], fragment("?>>?", p.another_field, "json_key")) + desc: dynamic([p], fragment("?->>?", p.another_field, "json_key")) ] from query, order_by: ^order_by @@ -513,7 +513,7 @@ defmodule Ecto.Query do group_by = [ :some_field, - dynamic([p], fragment("?>>?", p.another_field, "json_key")) + dynamic([p], fragment("?->>?", p.another_field, "json_key")) ] from query, group_by: ^group_by @@ -529,6 +529,46 @@ defmodule Ecto.Query do from query, group_by: ^[:some_field, dynamic(...)] + ## `select` and `select_merge` + + Dynamics can be inside maps interpolated at the root of a + `select` or `select_merge`. For example, you can write: + + fields = %{ + period: dynamic([p], p.month), + metric: dynamic([p], p.distance) + } + + from query, select: ^fields + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, select: %{field: ^dynamic(...)} + + But this will: + + from query, select: ^%{field: dynamic(...)} + + Maps with dynamics can also be merged into existing `select` structures, + enabling a variety of possibilities for partially dynamic selects: + + metric = dynamic([p], p.distance) + + from query, select: [:period, :metric], select_merge: ^%{metric: metric} + + Aliasing fields with `selected_as/2` and referencing them with `selected_as/1` + is also allowed: + + fields = %{ + period: dynamic([p], selected_as(p.month, :month)), + metric: dynamic([p], p.distance) + } + + order = dynamic(selected_as(:month)) + + from query, select: ^fields, order_by: ^order + ## Updates A `dynamic` is also supported inside updates, for example: @@ -751,8 +791,8 @@ defmodule Ecto.Query do It can either be a keyword query or a query expression. If it is a keyword query the first argument must be - either an `in` expression, or a value that implements - the `Ecto.Queryable` protocol. If the query needs a + either an `in` expression, a value that implements + the `Ecto.Queryable` protocol, or an `Ecto.Query.API.fragment/1`. If the query needs a reference to the data source in any other part of the expression, then an `in` must be used to create a reference variable. The second argument should be a keyword query @@ -763,14 +803,31 @@ defmodule Ecto.Query do a value that implements the `Ecto.Queryable` protocol and the second argument the expression. - ## Keywords example + ## Keywords examples + # `in` expression from(c in City, select: c) - ## Expressions example + # Ecto.Queryable + from(City, limit: 1) + # Fragment + from(f in fragment("generate_series(?, ?) as x", ^0, ^100000), select f.x) + + ## Expressions examples + + # Schema City |> select([c], c) + # Source + "cities" |> select([c], c) + + # Source with schema + {"cities", Source} |> select([c], c) + + # Ecto.Query + from(c in Cities) |> select([c], c) + ## Examples def paginate(query, page, size) do @@ -1239,6 +1296,11 @@ defmodule Ecto.Query do If you want a map with only the selected fields to be returned. + To select a struct but omit only given fields, you can + override them with `nil` or another default value: + + from(city in City, select: %{city | geojson: nil, text: "<redacted>"}) + For more information, read the docs for `Ecto.Query.API.struct/2` and `Ecto.Query.API.map/2`. @@ -1250,7 +1312,17 @@ defmodule Ecto.Query do City |> select([:name]) City |> select([c], struct(c, [:name])) City |> select([c], map(c, [:name])) + City |> select([c], %{c | geojson: nil, text: "<redacted>"}) + + ## Dynamic parts + + Dynamics can be part of a `select` as values in a map that must be interpolated + at the root level: + + period = if monthly?, do: dynamic([p], p.month), else: dynamic([p], p.date) + metric = if distance?, do: dynamic([p], p.distance), else: dynamic([p], p.time) + from(c in City, select: ^%{period: period, metric: metric}) """ defmacro select(query, binding \\ [], expr) do Builder.Select.build(:select, query, binding, expr, __CALLER__) @@ -1310,6 +1382,10 @@ defmodule Ecto.Query do `select_merge` cannot be used to set fields in associations, as associations are always loaded later, overriding any previous value. + + Dynamics can be part of a `select_merge` as values in a map that must be + interpolated at the root level. The rules for merging detailed above apply. + This allows merging dynamic values into previsouly selected maps and structs. """ defmacro select_merge(query, binding \\ [], expr) do Builder.Select.build(:merge, query, binding, expr, __CALLER__) @@ -1487,22 +1563,22 @@ defmodule Ecto.Query do It's also possible to order by an aliased or calculated column: - from(c in City, - select: %{ - name: c.name, - total_population: - fragment( - "COALESCE(?, ?) + ? AS total_population", - c.animal_population, - 0, - c.human_population - ) - }, - order_by: [ - # based on `AS total_population` in the previous fragment - {:desc, fragment("total_population")} - ] - ) + from(c in City, + select: %{ + name: c.name, + total_population: + fragment( + "COALESCE(?, ?) + ? AS total_population", + c.animal_population, + 0, + c.human_population + ) + }, + order_by: [ + # based on `AS total_population` in the previous fragment + {:desc, fragment("total_population")} + ] + ) ## Expressions examples @@ -1526,18 +1602,35 @@ defmodule Ecto.Query do consider using `union_all/2`. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the union. + current `query` apply to the result of the union. `order_by` must + be specified in one of the following ways, since the union of two + or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the union fields. + - Wrap the union in a subquery and refer to the binding of the subquery. + + ## Keywords examples + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, union: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + union_query = from c in Customer, select: c.city, union: ^supplier_query + from s in subquery(union_query), order_by: s.city + + ## Expressions examples + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> union(^supplier_query) + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + union(customer_query, ^supplier_query) + """ defmacro union(query, other_query) do Builder.Combination.build(:union, query, other_query, __CALLER__) @@ -1550,17 +1643,34 @@ defmodule Ecto.Query do must be exactly the same, with the same types in the same order. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the union. + current `query` apply to the result of the union. `order_by` must + be specified in one of the following ways, since the union of two + or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the union fields. + - Wrap the union in a subquery and refer to the binding of the subquery. + ## Keywords examples + + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, union_all: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + union_all_query = from c in Customer, select: c.city, union_all: ^supplier_query + from s in subquery(union_all_query), order_by: s.city + ## Expressions examples + + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> union_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + union_all(customer_query, ^supplier_query) """ defmacro union_all(query, other_query) do Builder.Combination.build(:union_all, query, other_query, __CALLER__) @@ -1579,17 +1689,34 @@ defmodule Ecto.Query do removing duplicate rows consider using `except_all/2`. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the set difference. + current `query` apply to the result of the set difference. `order_by` + must be specified in one of the following ways, since the set difference + of two or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the set difference fields. + - Wrap the set difference in a subquery and refer to the binding of the subquery. + ## Keywords examples + + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, except: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + except_query = from c in Customer, select: c.city, except: ^supplier_query + from s in subquery(except_query), order_by: s.city + ## Expressions examples + + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> except(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + except(customer_query, ^supplier_query) """ defmacro except(query, other_query) do Builder.Combination.build(:except, query, other_query, __CALLER__) @@ -1603,17 +1730,34 @@ defmodule Ecto.Query do types in the same order. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the set difference. + current `query` apply to the result of the set difference. `order_by` + must be specified in one of the following ways, since the set difference + of two or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the set difference fields. + - Wrap the set difference in a subquery and refer to the binding of the subquery. + + ## Keywords examples + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, except_all: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + except_all_query = from c in Customer, select: c.city, except_all: ^supplier_query + from s in subquery(except_all_query), order_by: s.city + + ## Expressions examples + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> except_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + except_all(customer_query, ^supplier_query) """ defmacro except_all(query, other_query) do Builder.Combination.build(:except_all, query, other_query, __CALLER__) @@ -1632,17 +1776,34 @@ defmodule Ecto.Query do removing duplicate rows consider using `intersect_all/2`. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the set difference. + current `query` apply to the result of the set difference. `order_by` + must be specified in one of the following ways, since the intersection + of two or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the intersection fields. + - Wrap the intersection in a subquery and refer to the binding of the subquery. + ## Keywords examples + + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, intersect: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + intersect_query = from c in Customer, select: c.city, intersect: ^supplier_query + from s in subquery(intersect_query), order_by: s.city + ## Expressions examples + + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> intersect(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + intersect(customer_query, ^supplier_query) """ defmacro intersect(query, other_query) do Builder.Combination.build(:intersect, query, other_query, __CALLER__) @@ -1656,17 +1817,34 @@ defmodule Ecto.Query do types in the same order. Note that the operations `order_by`, `limit` and `offset` of the - current `query` apply to the result of the set difference. + current `query` apply to the result of the set difference. `order_by` + must be specified in one of the following ways, since the intersection + of two or more queries is not automatically aliased: - ## Keywords example + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement that directly access the intersection fields. + - Wrap the intersection in a subquery and refer to the binding of the subquery. + + ## Keywords examples + # Unordered result supplier_query = from s in Supplier, select: s.city from c in Customer, select: c.city, intersect_all: ^supplier_query - ## Expressions example + # Ordered result + supplier_query = from s in Supplier, select: s.city + intersect_all_query = from c in Customer, select: c.city, intersect_all: ^supplier_query + from s in subquery(intersect_all_query), order_by: s.city + ## Expressions examples + + # Unordered result supplier_query = Supplier |> select([s], s.city) Customer |> select([c], c.city) |> intersect_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + intersect_all(customer_query, ^supplier_query) """ defmacro intersect_all(query, other_query) do Builder.Combination.build(:intersect_all, query, other_query, __CALLER__) @@ -2114,6 +2292,65 @@ defmodule Ecto.Query do end @doc """ + Applies a callback function to a query if it doesn't contain the given named binding. + Otherwise, returns the original query. + + The callback function must accept a queryable and return an `Ecto.Query` struct + that contains the provided named binding, otherwise an error is raised. It can also + accept second argument which is the atom representing the name of a binding. + + For example, one might use this function as a convenience to conditionally add a new + named join to a query: + + if has_named_binding?(query, :comments) do + query + else + join(query, :left, c in assoc(p, :comments), as: :comments) + end + + With this function it can be simplified to: + + with_named_binding(query, :comments, fn query, binding -> + join(query, :left, a in assoc(p, ^binding), as: ^binding) + end) + + For more information on named bindings see "Named bindings" in this module doc or `has_named_binding/2`. + """ + def with_named_binding(%Ecto.Query{} = query, key, fun) do + if has_named_binding?(query, key) do + query + else + query + |> apply_binding_callback(fun, key) + |> raise_on_invalid_callback_return(key) + end + end + + def with_named_binding(queryable, key, fun) do + queryable + |> Ecto.Queryable.to_query() + |> with_named_binding(key, fun) + end + + defp apply_binding_callback(query, fun, _key) when is_function(fun, 1), do: query |> fun.() + defp apply_binding_callback(query, fun, key) when is_function(fun, 2), do: query |> fun.(key) + defp apply_binding_callback(_query, fun, _key) do + raise ArgumentError, "callback function for with_named_binding/3 should accept one or two arguments, got: #{inspect(fun)}" + end + + defp raise_on_invalid_callback_return(%Ecto.Query{} = query, key) do + if has_named_binding?(query, key) do + query + else + raise RuntimeError, "callback function for with_named_binding/3 should create a named binding for key #{inspect(key)}" + end + end + + defp raise_on_invalid_callback_return(other, _key) do + raise RuntimeError, "callback function for with_named_binding/3 should return an Ecto.Query struct, got: #{inspect(other)}" + end + + @doc """ Reverses the ordering of the query. ASC columns become DESC columns (and vice-versa). If the query diff --git a/.deps/ecto/lib/ecto/query/api.ex b/.deps/ecto/lib/ecto/query/api.ex @@ -11,8 +11,8 @@ defmodule Ecto.Query.API do * Null check functions: `is_nil/1` * Aggregates: `count/0`, `count/1`, `avg/1`, `sum/1`, `min/1`, `max/1` * Date/time intervals: `datetime_add/3`, `date_add/3`, `from_now/2`, `ago/2` - * Inside select: `struct/2`, `map/2`, `merge/2` and literals (map, tuples, lists, etc) - * General: `fragment/1`, `field/2`, `type/2`, `as/1`, `parent_as/1` + * Inside select: `struct/2`, `map/2`, `merge/2`, `selected_as/2` and literals (map, tuples, lists, etc) + * General: `fragment/1`, `field/2`, `type/2`, `as/1`, `parent_as/1`, `selected_as/1` Note the functions in this module exist for documentation purposes and one should never need to invoke them directly. @@ -663,6 +663,11 @@ defmodule Ecto.Query.API do from p in Post, select: type(coalesce(p.cost, 0), :integer) + Or to type fields from a parent query using `parent_as/1`: + + child = from c in Comment, where: type(parent_as(:posts).id, :string) == c.text + from Post, as: :posts, inner_lateral_join: c in subquery(child), select: c.text + """ def type(interpolated_value, type), do: doc! [interpolated_value, type] @@ -682,6 +687,46 @@ defmodule Ecto.Query.API do """ def parent_as(binding), do: doc! [binding] + @doc """ + Refer to an alias of a selected value. + + This can be used to refer to aliases created using `selected_as/2`. If + the alias hasn't been created using `selected_as/2`, an error will be raised. + + Each database has its own rules governing which clauses can reference these aliases. + If an error is raised mentioning an unknown column, most likely the alias is being + referenced somewhere that is not allowed. Consult the documentation for the database + to ensure the alias is being referenced correctly. + """ + def selected_as(name), do: doc! [name] + + @doc """ + Creates an alias for the given selected value. + + When working with calculated values, an alias can be used to simplify + the query. Otherwise, the entire expression would need to be copied when + referencing it outside of select statements. + + This comes in handy when, for instance, you would like to use the calculated + value in `Ecto.Query.group_by/3` or `Ecto.Query.order_by/3`: + + from p in Post, + select: %{ + posted: selected_as(p.posted, :date), + sum_visits: p.visits |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + The name of the alias must be an atom and it can only be used in the outer most + select expression, otherwise an error is raised. Please note that the alias name + does not have to match the key when `select` returns a map, struct or keyword list. + + Using this in conjunction with `selected_as/1` is recommended to ensure only defined aliases + are referenced. + """ + def selected_as(selected_value, name), do: doc! [selected_value, name] + defp doc!(_) do raise "the functions in Ecto.Query.API should not be invoked directly, " <> "they serve for documentation purposes only" diff --git a/.deps/ecto/lib/ecto/query/builder.ex b/.deps/ecto/lib/ecto/query/builder.ex @@ -41,6 +41,8 @@ defmodule Ecto.Query.Builder do ntile: {1, :integer} ] + @select_alias_dummy_value [] + @typedoc """ Quoted types store primitive types and types in the format {source, quoted}. The latter are handled directly in the planner, @@ -52,6 +54,17 @@ defmodule Ecto.Query.Builder do """ @type quoted_type :: Ecto.Type.primitive | {non_neg_integer, atom | Macro.t} + @typedoc """ + The accumulator during escape. + + If the subqueries field is available, subquery escaping must take place. + """ + @type acc :: %{ + optional(:subqueries) => list(Macro.t()), + optional(:take) => %{non_neg_integer => Macro.t()}, + optional(any) => any + } + @doc """ Smart escapes a query expression and extracts interpolated values in a map. @@ -61,8 +74,8 @@ defmodule Ecto.Query.Builder do with `^index` in the query where index is a number indexing into the map. """ - @spec escape(Macro.t, quoted_type | {:in, quoted_type} | {:out, quoted_type}, {list, term}, - Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) :: {Macro.t, {list, term}} + @spec escape(Macro.t, quoted_type | {:in, quoted_type} | {:out, quoted_type}, {list, acc}, + Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) :: {Macro.t, {list, acc}} def escape(expr, type, params_acc, vars, env) # var.x - where var is bound @@ -121,6 +134,10 @@ defmodule Ecto.Query.Builder do escape_with_type(access_expr, type, params_acc, vars, env) end + def escape({:type, _, [{{:., _, [{:parent_as, _, [_parent]}, _field]}, _, []} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + def escape({:type, meta, [expr, type]}, given_type, params_acc, vars, env) do case Macro.expand_once(expr, get_env(env)) do ^expr -> @@ -134,6 +151,7 @@ defmodule Ecto.Query.Builder do * an aggregation or window expression (avg, count, min, max, sum, over, filter) * a conditional expression (coalesce) * access/json paths (p.column[0].field) + * parent_as/1 (parent_as(:parent).field) Got: #{Macro.to_string(expr)} """ @@ -168,11 +186,13 @@ defmodule Ecto.Query.Builder do end # subqueries - def escape({:subquery, _, [expr]}, _, {params, subqueries}, _vars, _env) do + def escape({:subquery, _, [expr]}, _, {params, %{subqueries: subqueries} = acc}, _vars, _env) do subquery = quote(do: Ecto.Query.subquery(unquote(expr))) index = length(subqueries) - expr = {:subquery, index} # used both in ast and in parameters, as a placeholder. - {expr, {[expr | params], [subquery | subqueries]}} + # used both in ast and in parameters, as a placeholder. + expr = {:subquery, index} + acc = %{acc | subqueries: [subquery | subqueries]} + {expr, {[expr | params], acc}} end # interval @@ -386,6 +406,29 @@ defmodule Ecto.Query.Builder do {{:{}, [], [:over, [], [aggregate, window]]}, params_acc} end + def escape({:selected_as, _, [_expr, _name]}, _type, _params_acc, _vars, _env) do + error! """ + selected_as/2 can only be used at the root of a select statement. \ + If you are trying to use it inside of an expression, consider putting the \ + expression inside of `selected_as/2` instead. For instance, instead of: + + from p in Post, select: coalesce(selected_as(p.visits, :v), 0) + + use: + + from p in Post, select: selected_as(coalesce(p.visits, 0), :v) + """ + end + + def escape({:selected_as, _, [name]}, _type, params_acc, _vars, _env) when is_atom(name) do + expr = {:{}, [], [:selected_as, [], [name]]} + {expr, params_acc} + end + + def escape({:selected_as, _, [name]}, _type, _params_acc, _vars, _env) do + error! "selected_as/1 expects `name` to be an atom, got `#{inspect(name)}`" + end + def escape({quantifier, meta, [subquery]}, type, params_acc, vars, env) when quantifier in [:all, :any, :exists] do {subquery, params_acc} = escape({:subquery, meta, [subquery]}, type, params_acc, vars, env) {{:{}, [], [quantifier, [], [subquery]]}, params_acc} @@ -716,7 +759,7 @@ defmodule Ecto.Query.Builder do do: {find_var!(var, vars), field} def validate_type!(type, _vars, _env) do - error! "type/2 expects an alias, atom, initialized parameterized type or " <> + error! "type/2 expects an alias, atom, initialized parameterized type or " <> "source.field as second argument, got: `#{Macro.to_string(type)}`" end @@ -741,6 +784,12 @@ defmodule Ecto.Query.Builder do def escape_params(list), do: Enum.reverse(list) @doc """ + Escape the select alias map + """ + @spec escape_select_aliases(map()) :: Macro.t + def escape_select_aliases(%{} = aliases), do: {:%{}, [], Map.to_list(aliases)} + + @doc """ Escapes a variable according to the given binds. A escaped variable is represented internally as @@ -1163,6 +1212,33 @@ defmodule Ecto.Query.Builder do end @doc """ + Bump subqueries by the count of pre-existing subqueries. + """ + def bump_subqueries(expr, []), do: expr + + def bump_subqueries(expr, subqueries) do + len = length(subqueries) + + Macro.prewalk(expr, fn + {:subquery, counter} -> {:subquery, len + counter} + other -> other + end) + end + + @doc """ + Called by the select escaper at compile time and dynamic builder at runtime to track select aliases + """ + def add_select_alias(aliases, name) do + case aliases do + %{^name => _} -> + error! "the alias `#{inspect(name)}` has been specified more than once using `selected_as/2`" + + aliases -> + Map.put(aliases, name, @select_alias_dummy_value) + end + end + + @doc """ Applies a query at compilation time or at runtime. This function is responsible for checking if a given query is an diff --git a/.deps/ecto/lib/ecto/query/builder/cte.ex b/.deps/ecto/lib/ecto/query/builder/cte.ex @@ -46,7 +46,7 @@ defmodule Ecto.Query.Builder.CTE do end def build_cte(_name, {:fragment, _, _} = fragment, env) do - {expr, {params, :acc}} = Builder.escape(fragment, :any, {[], :acc}, [], env) + {expr, {params, _acc}} = Builder.escape(fragment, :any, {[], %{}}, [], env) params = Builder.escape_params(params) quote do @@ -74,14 +74,31 @@ defmodule Ecto.Query.Builder.CTE do The callback applied by `build/4` to build the query. """ @spec apply(Ecto.Queryable.t, bitstring, Ecto.Queryable.t) :: Ecto.Query.t + # Runtime + def apply(%Ecto.Query{with_ctes: with_expr} = query, name, %_{} = with_query) do + %{query | with_ctes: apply_cte(with_expr, name, with_query)} + end + + # Compile def apply(%Ecto.Query{with_ctes: with_expr} = query, name, with_query) do - with_expr = with_expr || %Ecto.Query.WithExpr{} - queries = List.keystore(with_expr.queries, name, 0, {name, with_query}) - with_expr = %{with_expr | queries: queries} - %{query | with_ctes: with_expr} + update = quote do + Ecto.Query.Builder.CTE.apply_cte(unquote(with_expr), unquote(name), unquote(with_query)) + end + + %{query | with_ctes: update} end + # Runtime catch-all def apply(query, name, with_query) do apply(Ecto.Queryable.to_query(query), name, with_query) end + + @doc false + def apply_cte(nil, name, with_query) do + %Ecto.Query.WithExpr{queries: [{name, with_query}]} + end + + def apply_cte(%Ecto.Query.WithExpr{queries: queries} = with_expr, name, with_query) do + %{with_expr | queries: List.keystore(queries, name, 0, {name, with_query})} + end end diff --git a/.deps/ecto/lib/ecto/query/builder/distinct.ex b/.deps/ecto/lib/ecto/query/builder/distinct.ex @@ -8,13 +8,13 @@ defmodule Ecto.Query.Builder.Distinct do @doc """ Escapes a list of quoted expressions. - iex> escape(quote do true end, {[], :acc}, [], __ENV__) - {true, {[], :acc}} + iex> escape(quote do true end, {[], %{}}, [], __ENV__) + {true, {[], %{}}} - iex> escape(quote do [x.x, 13] end, {[], :acc}, [x: 0], __ENV__) + iex> escape(quote do [x.x, 13] end, {[], %{}}, [x: 0], __ENV__) {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, asc: 13], - {[], :acc}} + {[], %{}}} """ @spec escape(Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: {Macro.t, {list, term}} @@ -54,7 +54,7 @@ defmodule Ecto.Query.Builder.Distinct do def build(query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, _}} = escape(expr, {[], :acc}, binding, env) + {expr, {params, _acc}} = escape(expr, {[], %{}}, binding, env) params = Builder.escape_params(params) distinct = quote do: %Ecto.Query.QueryExpr{ diff --git a/.deps/ecto/lib/ecto/query/builder/dynamic.ex b/.deps/ecto/lib/ecto/query/builder/dynamic.ex @@ -4,6 +4,7 @@ defmodule Ecto.Query.Builder.Dynamic do @moduledoc false alias Ecto.Query.Builder + alias Ecto.Query.Builder.Select @doc """ Builds a dynamic expression. @@ -11,13 +12,14 @@ defmodule Ecto.Query.Builder.Dynamic do @spec build([Macro.t], Macro.t, Macro.Env.t) :: Macro.t def build(binding, expr, env) do {query, vars} = Builder.escape_binding(quote(do: query), binding, env) - {expr, {params, subqueries}} = Builder.escape(expr, :any, {[], []}, vars, env) + {expr, {params, acc}} = escape(expr, {[], %{subqueries: [], aliases: %{}}}, vars, env) + aliases = Builder.escape_select_aliases(acc.aliases) params = Builder.escape_params(params) quote do %Ecto.Query.DynamicExpr{fun: fn query -> _ = unquote(query) - {unquote(expr), unquote(params), unquote(subqueries)} + {unquote(expr), unquote(params), unquote(Enum.reverse(acc.subqueries)), unquote(aliases)} end, binding: unquote(Macro.escape(binding)), file: unquote(env.file), @@ -25,11 +27,23 @@ defmodule Ecto.Query.Builder.Dynamic do end end + defp escape({:selected_as, _, [_, _]} = expr, _params_acc, vars, env) do + Select.escape(expr, vars, env) + end + + defp escape(expr, params_acc, vars, env) do + Builder.escape(expr, :any, params_acc, vars, {env, &escape_expansion/5}) + end + + defp escape_expansion(expr, _type, params_acc, vars, env) do + escape(expr, params_acc, vars, env) + end + @doc """ Expands a dynamic expression for insertion into the given query. """ def fully_expand(query, %{file: file, line: line, binding: binding} = dynamic) do - {expr, {binding, params, subqueries, _count}} = expand(query, dynamic, {binding, [], [], 0}) + {expr, {binding, params, subqueries, _aliases, _count}} = expand(query, dynamic, {binding, [], [], %{}, 0}) {expr, binding, Enum.reverse(params), Enum.reverse(subqueries), file, line} end @@ -40,8 +54,16 @@ defmodule Ecto.Query.Builder.Dynamic do list is not reversed. This is useful when the dynamic expression is given in the middle of an expression. """ + def partially_expand(query, %{binding: binding} = dynamic, params, subqueries, aliases, count) do + {expr, {_binding, params, subqueries, aliases, count}} = + expand(query, dynamic, {binding, params, subqueries, aliases, count}) + + {expr, params, subqueries, aliases, count} + end + def partially_expand(kind, query, %{binding: binding} = dynamic, params, count) do - {expr, {_binding, params, subqueries, count}} = expand(query, dynamic, {binding, params, [], count}) + {expr, {_binding, params, subqueries, _aliases, count}} = + expand(query, dynamic, {binding, params, [], %{}, count}) if subqueries != [] do raise ArgumentError, "subqueries are not allowed in `#{kind}` expressions" @@ -50,27 +72,34 @@ defmodule Ecto.Query.Builder.Dynamic do {expr, params, count} end - defp expand(query, %{fun: fun}, {binding, params, subqueries, count}) do - {dynamic_expr, dynamic_params, dynamic_subqueries} = fun.(query) + defp expand(query, %{fun: fun}, {binding, params, subqueries, aliases, count}) do + {dynamic_expr, dynamic_params, dynamic_subqueries, dynamic_aliases} = fun.(query) + aliases = merge_aliases(aliases, dynamic_aliases) - Macro.postwalk(dynamic_expr, {binding, params, subqueries, count}, fn - {:^, meta, [ix]}, {binding, params, subqueries, count} -> + Macro.postwalk(dynamic_expr, {binding, params, subqueries, aliases, count}, fn + {:^, meta, [ix]}, {binding, params, subqueries, aliases, count} -> case Enum.fetch!(dynamic_params, ix) do {%Ecto.Query.DynamicExpr{binding: new_binding} = dynamic, _} -> binding = if length(new_binding) > length(binding), do: new_binding, else: binding - expand(query, dynamic, {binding, params, subqueries, count}) - + expand(query, dynamic, {binding, params, subqueries, aliases, count}) + param -> - {{:^, meta, [count]}, {binding, [param | params], subqueries, count + 1}} + {{:^, meta, [count]}, {binding, [param | params], subqueries, aliases, count + 1}} end - {:subquery, i}, {binding, params, subqueries, count} -> + {:subquery, i}, {binding, params, subqueries, aliases, count} -> subquery = Enum.fetch!(dynamic_subqueries, i) ix = length(subqueries) - {{:subquery, ix}, {binding, [{:subquery, ix} | params], [subquery | subqueries], count + 1}} + {{:subquery, ix}, {binding, [{:subquery, ix} | params], [subquery | subqueries], aliases, count + 1}} expr, acc -> {expr, acc} end) end + + defp merge_aliases(old_aliases, new_aliases) do + Enum.reduce(new_aliases, old_aliases, fn {alias, _}, aliases -> + Builder.add_select_alias(aliases, alias) + end) + end end diff --git a/.deps/ecto/lib/ecto/query/builder/filter.ex b/.deps/ecto/lib/ecto/query/builder/filter.ex @@ -18,33 +18,33 @@ defmodule Ecto.Query.Builder.Filter do """ @spec escape(:where | :having | :on, Macro.t, non_neg_integer, Keyword.t, Macro.Env.t) :: {Macro.t, {list, list}} def escape(_kind, [], _binding, _vars, _env) do - {true, {[], []}} + {true, {[], %{subqueries: []}}} end def escape(kind, expr, binding, vars, env) when is_list(expr) do - {parts, params_subqueries} = - Enum.map_reduce(expr, {[], []}, fn - {field, nil}, _params_subqueries -> + {parts, params_acc} = + Enum.map_reduce(expr, {[], %{subqueries: []}}, fn + {field, nil}, _params_acc -> Builder.error! "nil given for `#{field}`. Comparison with nil is forbidden as it is unsafe. " <> "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" - {field, value}, params_subqueries when is_atom(field) -> + {field, value}, params_acc when is_atom(field) -> value = check_for_nils(value, field) - {value, params_subqueries} = Builder.escape(value, {binding, field}, params_subqueries, vars, env) - {{:{}, [], [:==, [], [to_escaped_field(binding, field), value]]}, params_subqueries} + {value, params_acc} = Builder.escape(value, {binding, field}, params_acc, vars, env) + {{:{}, [], [:==, [], [to_escaped_field(binding, field), value]]}, params_acc} - _, _params_subqueries -> + _, _params_acc -> Builder.error! "expected a keyword list at compile time in #{kind}, " <> "got: `#{Macro.to_string expr}`. If you would like to " <> "pass a list dynamically, please interpolate the whole list with ^" end) expr = Enum.reduce parts, &{:{}, [], [:and, [], [&2, &1]]} - {expr, params_subqueries} + {expr, params_acc} end def escape(_kind, expr, _binding, vars, env) do - Builder.escape(expr, :boolean, {[], []}, vars, env) + Builder.escape(expr, :boolean, {[], %{subqueries: []}}, vars, env) end @doc """ @@ -64,10 +64,10 @@ defmodule Ecto.Query.Builder.Filter do def build(kind, op, query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, subqueries}} = escape(kind, expr, 0, binding, env) + {expr, {params, acc}} = escape(kind, expr, 0, binding, env) params = Builder.escape_params(params) - subqueries = Enum.reverse(subqueries) + subqueries = Enum.reverse(acc.subqueries) expr = quote do: %Ecto.Query.BooleanExpr{ expr: unquote(expr), diff --git a/.deps/ecto/lib/ecto/query/builder/from.ex b/.deps/ecto/lib/ecto/query/builder/from.ex @@ -35,13 +35,22 @@ defmodule Ecto.Query.Builder.From do """ @spec escape(Macro.t(), Macro.Env.t()) :: {Macro.t(), Keyword.t()} def escape({:in, _, [var, query]}, env) do + query = escape_source(query, env) Builder.escape_binding(query, List.wrap(var), env) end - def escape(query, _env) do + def escape(query, env) do + query = escape_source(query, env) {query, []} end + defp escape_source({:fragment, _, _} = fragment, env) do + {fragment, {params, _acc}} = Builder.escape(fragment, :any, {[], %{}}, [], env) + {fragment, Builder.escape_params(params)} + end + + defp escape_source(query, _env), do: query + @doc """ Builds a quoted expression. @@ -81,29 +90,34 @@ defmodule Ecto.Query.Builder.From do # dependencies between modules are added source = quote(do: unquote(schema).__schema__(:source)) {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} - {query(prefix, source, schema, as, hints), binds, 1} + {query(prefix, {source, schema}, [], as, hints, env.file, env.line), binds, 1} source when is_binary(source) -> {:ok, prefix} = prefix || {:ok, nil} # When a binary is used, there is no schema - {query(prefix, source, nil, as, hints), binds, 1} + {query(prefix, {source, nil}, [], as, hints, env.file, env.line), binds, 1} {source, schema} when is_binary(source) and is_atom(schema) -> {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} - {query(prefix, source, schema, as, hints), binds, 1} + {query(prefix, {source, schema}, [], as, hints, env.file, env.line), binds, 1} + + {{:{}, _, [:fragment, _, _]} = fragment, params} -> + {:ok, prefix} = prefix || {:ok, nil} + {query(prefix, fragment, params, as, hints, env.file, env.line), binds, 1} _other -> - quoted = quote do - Ecto.Query.Builder.From.apply(unquote(query), unquote(length(binds)), unquote(as), unquote(prefix), unquote(hints)) - end + quoted = + quote do + Ecto.Query.Builder.From.apply(unquote(query), unquote(length(binds)), unquote(as), unquote(prefix), unquote(hints)) + end {quoted, binds, nil} end end - defp query(prefix, source, schema, as, hints) do + defp query(prefix, source, params, as, hints, file, line) do aliases = if as, do: [{as, 0}], else: [] - from_fields = [source: {source, schema}, as: as, prefix: prefix, hints: hints] + from_fields = [source: source, params: params, as: as, prefix: prefix, hints: hints, file: file, line: line] query_fields = [ from: {:%, [], [Ecto.Query.FromExpr, {:%{}, [], from_fields}]}, diff --git a/.deps/ecto/lib/ecto/query/builder/group_by.ex b/.deps/ecto/lib/ecto/query/builder/group_by.ex @@ -10,10 +10,10 @@ defmodule Ecto.Query.Builder.GroupBy do See `Ecto.Builder.escape/2`. - iex> escape(:group_by, quote do [x.x, 13] end, {[], :acc}, [x: 0], __ENV__) + iex> escape(:group_by, quote do [x.x, 13] end, {[], %{}}, [x: 0], __ENV__) {[{:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, 13], - {[], :acc}} + {[], %{}}} """ @spec escape(:group_by | :partition_by, Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: {Macro.t, {list, term}} @@ -93,7 +93,7 @@ defmodule Ecto.Query.Builder.GroupBy do def build(query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, _}} = escape(:group_by, expr, {[], :acc}, binding, env) + {expr, {params, _acc}} = escape(:group_by, expr, {[], %{}}, binding, env) params = Builder.escape_params(params) group_by = quote do: %Ecto.Query.QueryExpr{ diff --git a/.deps/ecto/lib/ecto/query/builder/join.ex b/.deps/ecto/lib/ecto/query/builder/join.ex @@ -58,7 +58,7 @@ defmodule Ecto.Query.Builder.Join do end def escape({:fragment, _, [_ | _]} = expr, vars, env) do - {expr, {params, :acc}} = Builder.escape(expr, :any, {[], :acc}, vars, env) + {expr, {params, _acc}} = Builder.escape(expr, :any, {[], %{}}, vars, env) {:_, expr, nil, params} end @@ -206,7 +206,10 @@ defmodule Ecto.Query.Builder.Join do def build_on(on, join, as, query, binding, count_bind, env) do case Ecto.Query.Builder.Filter.escape(:on, on, count_bind, binding, env) do - {on_expr, {on_params, []}} -> + {_on_expr, {_on_params, %{subqueries: [_ | _]}}} -> + raise ArgumentError, "invalid expression for join `:on`, subqueries aren't supported" + + {on_expr, {on_params, _acc}} -> on_params = Builder.escape_params(on_params) join = @@ -223,9 +226,6 @@ defmodule Ecto.Query.Builder.Join do end Builder.apply_query(query, __MODULE__, [join, as, count_bind], env) - - _pattern -> - raise ArgumentError, "invalid expression for join `:on`, subqueries aren't supported" end end diff --git a/.deps/ecto/lib/ecto/query/builder/limit_offset.ex b/.deps/ecto/lib/ecto/query/builder/limit_offset.ex @@ -15,7 +15,7 @@ defmodule Ecto.Query.Builder.LimitOffset do @spec build(:limit | :offset, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t def build(type, query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, :acc}} = Builder.escape(expr, :integer, {[], :acc}, binding, env) + {expr, {params, _acc}} = Builder.escape(expr, :integer, {[], %{}}, binding, env) params = Builder.escape_params(params) if contains_variable?(expr) do diff --git a/.deps/ecto/lib/ecto/query/builder/lock.ex b/.deps/ecto/lib/ecto/query/builder/lock.ex @@ -16,7 +16,7 @@ defmodule Ecto.Query.Builder.Lock do def escape(lock, _vars, _env) when is_binary(lock), do: lock def escape({:fragment, _, [_ | _]} = expr, vars, env) do - {expr, {params, :acc}} = Builder.escape(expr, :any, {[], :acc}, vars, env) + {expr, {params, _acc}} = Builder.escape(expr, :any, {[], %{}}, vars, env) if params != [] do Builder.error!("value interpolation is not allowed in :lock") diff --git a/.deps/ecto/lib/ecto/query/builder/order_by.ex b/.deps/ecto/lib/ecto/query/builder/order_by.ex @@ -41,10 +41,10 @@ defmodule Ecto.Query.Builder.OrderBy do ## Examples - iex> escape(:order_by, quote do [x.x, desc: 13] end, {[], :acc}, [x: 0], __ENV__) + iex> escape(:order_by, quote do [x.x, desc: 13] end, {[], %{}}, [x: 0], __ENV__) {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, desc: 13], - {[], :acc}} + {[], %{}}} """ @spec escape(:order_by | :distinct, Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: @@ -185,7 +185,7 @@ defmodule Ecto.Query.Builder.OrderBy do def build(query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, _}} = escape(:order_by, expr, {[], :acc}, binding, env) + {expr, {params, _acc}} = escape(:order_by, expr, {[], %{}}, binding, env) params = Builder.escape_params(params) order_by = quote do: %Ecto.Query.QueryExpr{ diff --git a/.deps/ecto/lib/ecto/query/builder/select.ex b/.deps/ecto/lib/ecto/query/builder/select.ex @@ -14,16 +14,16 @@ defmodule Ecto.Query.Builder.Select do ## Examples iex> escape({1, 2}, [], __ENV__) - {{:{}, [], [:{}, [], [1, 2]]}, {[], %{}}} + {{:{}, [], [:{}, [], [1, 2]]}, {[], %{take: %{}, subqueries: [], aliases: %{}}}} iex> escape([1, 2], [], __ENV__) - {[1, 2], {[], %{}}} + {[1, 2], {[], %{take: %{}, subqueries: [], aliases: %{}}}} iex> escape(quote(do: x), [x: 0], __ENV__) - {{:{}, [], [:&, [], [0]]}, {[], %{}}} + {{:{}, [], [:&, [], [0]]}, {[], %{take: %{}, subqueries: [], aliases: %{}}}} """ - @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, {list, %{}}} + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, {list, %{take: map, subqueries: list}}} def escape(atom, _vars, _env) when is_atom(atom) and not is_boolean(atom) and atom != nil do Builder.error! """ @@ -34,7 +34,10 @@ defmodule Ecto.Query.Builder.Select do def escape(other, vars, env) do cond do take?(other) -> - {{:{}, [], [:&, [], [0]]}, {[], %{0 => {:any, Macro.expand(other, env)}}}} + { + {:{}, [], [:&, [], [0]]}, + {[], %{take: %{0 => {:any, Macro.expand(other, env)}}, subqueries: [], aliases: %{}}} + } maybe_take?(other) -> Builder.error! """ @@ -42,91 +45,106 @@ defmodule Ecto.Query.Builder.Select do Instead interpolate all fields at once, such as: `select: ^[:foo, :bar, :baz]`. \ Got: #{Macro.to_string(other)}. """ - + true -> - escape(other, {[], %{}}, vars, env) + {expr, {params, acc}} = escape(other, {[], %{take: %{}, subqueries: [], aliases: %{}}}, vars, env) + acc = %{acc | subqueries: Enum.reverse(acc.subqueries)} + {expr, {params, acc}} end end # Tuple - defp escape({left, right}, params_take, vars, env) do - escape({:{}, [], [left, right]}, params_take, vars, env) + defp escape({left, right}, params_acc, vars, env) do + escape({:{}, [], [left, right]}, params_acc, vars, env) end # Tuple - defp escape({:{}, _, list}, params_take, vars, env) do - {list, params_take} = Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env)) + defp escape({:{}, _, list}, params_acc, vars, env) do + {list, params_acc} = Enum.map_reduce(list, params_acc, &escape(&1, &2, vars, env)) expr = {:{}, [], [:{}, [], list]} - {expr, params_take} + {expr, params_acc} end # Struct - defp escape({:%, _, [name, map]}, params_take, vars, env) do + defp escape({:%, _, [name, map]}, params_acc, vars, env) do name = Macro.expand(name, env) - {escaped_map, params_take} = escape(map, params_take, vars, env) - {{:{}, [], [:%, [], [name, escaped_map]]}, params_take} + {escaped_map, params_acc} = escape(map, params_acc, vars, env) + {{:{}, [], [:%, [], [name, escaped_map]]}, params_acc} end # Map - defp escape({:%{}, _, [{:|, _, [data, pairs]}]}, params_take, vars, env) do - {data, params_take} = escape(data, params_take, vars, env) - {pairs, params_take} = escape_pairs(pairs, params_take, vars, env) - {{:{}, [], [:%{}, [], [{:{}, [], [:|, [], [data, pairs]]}]]}, params_take} + defp escape({:%{}, _, [{:|, _, [data, pairs]}]}, params_acc, vars, env) do + {data, params_acc} = escape(data, params_acc, vars, env) + {pairs, params_acc} = escape_pairs(pairs, params_acc, vars, env) + {{:{}, [], [:%{}, [], [{:{}, [], [:|, [], [data, pairs]]}]]}, params_acc} end # Merge - defp escape({:merge, _, [left, {kind, _, _} = right]}, params_take, vars, env) + defp escape({:merge, _, [left, {kind, _, _} = right]}, params_acc, vars, env) when kind in [:%{}, :map] do - {left, params_take} = escape(left, params_take, vars, env) - {right, params_take} = escape(right, params_take, vars, env) - {{:{}, [], [:merge, [], [left, right]]}, params_take} + {left, params_acc} = escape(left, params_acc, vars, env) + {right, params_acc} = escape(right, params_acc, vars, env) + {{:{}, [], [:merge, [], [left, right]]}, params_acc} end - defp escape({:merge, _, [_left, right]}, _params_take, _vars, _env) do + defp escape({:merge, _, [_left, right]}, _params_acc, _vars, _env) do Builder.error! "expected the second argument of merge/2 in select to be a map, got: `#{Macro.to_string(right)}`" end # Map - defp escape({:%{}, _, pairs}, params_take, vars, env) do - {pairs, params_take} = escape_pairs(pairs, params_take, vars, env) - {{:{}, [], [:%{}, [], pairs]}, params_take} + defp escape({:%{}, _, pairs}, params_acc, vars, env) do + {pairs, params_acc} = escape_pairs(pairs, params_acc, vars, env) + {{:{}, [], [:%{}, [], pairs]}, params_acc} end # List - defp escape(list, params_take, vars, env) when is_list(list) do - Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env)) + defp escape(list, params_acc, vars, env) when is_list(list) do + Enum.map_reduce(list, params_acc, &escape(&1, &2, vars, env)) end # map/struct(var, [:foo, :bar]) - defp escape({tag, _, [{var, _, context}, fields]}, {params, take}, vars, env) + defp escape({tag, _, [{var, _, context}, fields]}, {params, acc}, vars, env) when tag in [:map, :struct] and is_atom(var) and is_atom(context) do taken = escape_fields(fields, tag, env) expr = Builder.escape_var!(var, vars) - take = add_take(take, Builder.find_var!(var, vars), {tag, taken}) - {expr, {params, take}} + acc = add_take(acc, Builder.find_var!(var, vars), {tag, taken}) + {expr, {params, acc}} + end + + # aliased values + defp escape({:selected_as, _, [expr, name]}, {params, acc}, vars, env) when is_atom(name) do + {escaped, {params, acc}} = Builder.escape(expr, :any, {params, acc}, vars, env) + expr = {:{}, [], [:selected_as, [], [escaped, name]]} + aliases = Builder.add_select_alias(acc.aliases, name) + {expr, {params, %{acc | aliases: aliases}}} + end + + defp escape({:selected_as, _, [_expr, name]}, {_params, _acc}, _vars, _env) do + Builder.error! "selected_as/2 expects `name` to be an atom, got `#{inspect(name)}`" end - defp escape(expr, params_take, vars, env) do - Builder.escape(expr, :any, params_take, vars, {env, &escape_expansion/5}) + defp escape(expr, params_acc, vars, env) do + Builder.escape(expr, :any, params_acc, vars, {env, &escape_expansion/5}) end - defp escape_expansion(expr, _type, params_take, vars, env) do - escape(expr, params_take, vars, env) + defp escape_expansion(expr, _type, params_acc, vars, env) do + escape(expr, params_acc, vars, env) end - defp escape_pairs(pairs, params_take, vars, env) do - Enum.map_reduce pairs, params_take, fn({k, v}, acc) -> + defp escape_pairs(pairs, params_acc, vars, env) do + Enum.map_reduce(pairs, params_acc, fn {k, v}, acc -> {k, acc} = escape_key(k, acc, vars, env) {v, acc} = escape(v, acc, vars, env) {{k, v}, acc} - end + end) end - defp escape_key(k, params_take, _vars, _env) when is_atom(k) do - {k, params_take} + defp escape_key(k, params_acc, _vars, _env) when is_atom(k) do + {k, params_acc} end - defp escape_key(k, params_take, vars, env) do - escape(k, params_take, vars, env) + + defp escape_key(k, params_acc, vars, env) do + escape(k, params_acc, vars, env) end defp escape_fields({:^, _, [interpolated]}, tag, _env) do @@ -139,8 +157,11 @@ defmodule Ecto.Query.Builder.Select do fields when is_list(fields) -> fields _ -> - Builder.error! "`#{tag}/2` in `select` expects either a literal or " <> - "an interpolated list of atom fields" + Builder.error!( + "`#{tag}/2` in `select` expects either a literal or " <> + "an interpolated (1) list of atom fields, (2) dynamic, or " <> + "(3) map with dynamic values" + ) end end @@ -180,16 +201,80 @@ defmodule Ecto.Query.Builder.Select do @doc """ Called at runtime for interpolated/dynamic selects. """ + def select!(kind, query, fields, file, line) when is_map(fields) do + {expr, {params, subqueries, aliases, _count}} = expand_nested(fields, {[], [], %{}, 0}, query) + + %Ecto.Query.SelectExpr{ + expr: expr, + params: Enum.reverse(params), + subqueries: Enum.reverse(subqueries), + aliases: aliases, + file: file, + line: line + } + |> apply_or_merge(kind, query) + end + def select!(kind, query, fields, file, line) do take = %{0 => {:any, fields!(:select, fields)}} - expr = %Ecto.Query.SelectExpr{expr: {:&, [], [0]}, take: take, file: file, line: line} + + %Ecto.Query.SelectExpr{expr: {:&, [], [0]}, take: take, file: file, line: line} + |> apply_or_merge(kind, query) + end + + defp apply_or_merge(select, kind, query) do if kind == :select do - apply(query, expr) + apply(query, select) else - merge(query, expr) + merge(query, select) end end + defp expand_nested(%Ecto.Query.DynamicExpr{} = dynamic, {params, subqueries, aliases, count}, query) do + {expr, params, subqueries, aliases, count} = + Ecto.Query.Builder.Dynamic.partially_expand(query, dynamic, params, subqueries, aliases, count) + + {expr, {params, subqueries, aliases, count}} + end + + defp expand_nested(%Ecto.SubQuery{} = subquery, {params, subqueries, aliases, count}, _query) do + index = length(subqueries) + # used both in ast and in parameters, as a placeholder. + expr = {:subquery, index} + params = [expr | params] + subqueries = [subquery | subqueries] + count = count + 1 + + {expr, {params, subqueries, aliases, count}} + end + + defp expand_nested(%type{} = fields, acc, query) do + {fields, acc} = fields |> Map.from_struct() |> expand_nested(acc, query) + {{:%, [], [type, fields]}, acc} + end + + defp expand_nested(fields, acc, query) when is_map(fields) do + {fields, acc} = fields |> Enum.map_reduce(acc, &expand_nested_pair(&1, &2, query)) + {{:%{}, [], fields}, acc} + end + + defp expand_nested(invalid, _acc, query) when is_list(invalid) or is_tuple(invalid) do + raise Ecto.QueryError, + query: query, + message: + "Interpolated map values in :select can only be " <> + "maps, structs, dynamics, subqueries and literals. Got #{inspect(invalid)}" + end + + defp expand_nested(other, acc, _query) do + {other, acc} + end + + defp expand_nested_pair({key, val}, acc, query) do + {val, acc} = expand_nested(val, acc, query) + {{key, val}, acc} + end + @doc """ Builds a quoted expression. @@ -208,16 +293,19 @@ defmodule Ecto.Query.Builder.Select do def build(kind, query, binding, expr, env) do {query, binding} = Builder.escape_binding(query, binding, env) - {expr, {params, take}} = escape(expr, binding, env) + {expr, {params, acc}} = escape(expr, binding, env) params = Builder.escape_params(params) - take = {:%{}, [], Map.to_list(take)} + take = {:%{}, [], Map.to_list(acc.take)} + aliases = {:%{}, [], Map.to_list(acc.aliases)} select = quote do: %Ecto.Query.SelectExpr{ expr: unquote(expr), params: unquote(params), file: unquote(env.file), line: unquote(env.line), - take: unquote(take)} + take: unquote(take), + subqueries: unquote(acc.subqueries), + aliases: unquote(aliases)} if kind == :select do Builder.apply_query(query, __MODULE__, [select], env) @@ -247,19 +335,23 @@ defmodule Ecto.Query.Builder.Select do The callback applied by `build/5` when merging. """ def merge(%Ecto.Query{select: nil} = query, new_select) do - merge(query, new_select, {:&, [], [0]}, [], %{}, new_select) + merge(query, new_select, {:&, [], [0]}, [], [], %{}, %{}, new_select) end def merge(%Ecto.Query{select: old_select} = query, new_select) do - %{expr: old_expr, params: old_params, take: old_take} = old_select - merge(query, old_select, old_expr, old_params, old_take, new_select) + %{expr: old_expr, params: old_params, subqueries: old_subqueries, take: old_take, aliases: old_aliases} = old_select + merge(query, old_select, old_expr, old_params, old_subqueries, old_take, old_aliases, new_select) end def merge(query, expr) do merge(Ecto.Queryable.to_query(query), expr) end - defp merge(query, select, old_expr, old_params, old_take, new_select) do - %{expr: new_expr, params: new_params, take: new_take} = new_select - new_expr = Ecto.Query.Builder.bump_interpolations(new_expr, old_params) + defp merge(query, select, old_expr, old_params, old_subqueries, old_take, old_aliases, new_select) do + %{expr: new_expr, params: new_params, subqueries: new_subqueries, take: new_take, aliases: new_aliases} = new_select + + new_expr = + new_expr + |> Ecto.Query.Builder.bump_interpolations(old_params) + |> Ecto.Query.Builder.bump_subqueries(old_subqueries) expr = case {classify_merge(old_expr, old_take), classify_merge(new_expr, new_take)} do @@ -318,8 +410,10 @@ defmodule Ecto.Query.Builder.Select do select = %{ select | expr: expr, - params: old_params ++ new_params, - take: merge_take(old_expr, old_take, new_take) + params: old_params ++ bump_subquery_params(new_params, old_subqueries), + subqueries: old_subqueries ++ new_subqueries, + take: merge_take(query.from.source, old_expr, old_take, new_take), + aliases: merge_aliases(old_aliases, new_aliases) } %{query | select: select} @@ -362,21 +456,39 @@ defmodule Ecto.Query.Builder.Select do Macro.to_string(other) end - defp add_take(take, key, value) do - Map.update(take, key, value, &merge_take_kind_and_fields(key, &1, value)) + defp add_take(acc, key, value) do + take = Map.update(acc.take, key, value, &merge_take_kind_and_fields(key, &1, value)) + %{acc | take: take} end - defp merge_take(old_expr, %{} = old_take, %{} = new_take) do - Enum.reduce(new_take, old_take, fn {binding, new_value}, acc -> + defp bump_subquery_params(new_params, old_subqueries) do + len = length(old_subqueries) + + Enum.map(new_params, fn + {:subquery, counter} -> {:subquery, len + counter} + other -> other + end) + end + + defp merge_take(source, old_expr, %{} = old_take, %{} = new_take) do + Enum.reduce(new_take, old_take, fn {binding, {new_kind, new_fields} = new_value}, acc -> case acc do %{^binding => old_value} -> Map.put(acc, binding, merge_take_kind_and_fields(binding, old_value, new_value)) %{} -> - # If the binding is a not filtered source, merge shouldn't restrict it - case old_expr do - {:&, _, [^binding]} -> acc - _ -> Map.put(acc, binding, new_value) + # If merging with a schema, add the schema's query fields. This comes in handy if the user + # is merging fields with load_in_query = false. + # If merging with a schemaless source, do nothing so the planner can take all the fields. + case {old_expr, source} do + {{:&, _, [^binding]}, {_source, schema}} when not is_nil(schema) -> + Map.put(acc, binding, {new_kind, Enum.uniq(new_fields ++ schema.__schema__(:query_fields))}) + + {{:&, _, [^binding]}, _} -> + acc + + _ -> + Map.put(acc, binding, new_value) end end end) @@ -393,4 +505,10 @@ defmodule Ecto.Query.Builder.Select do Builder.error! "cannot select_merge because the binding at position #{binding} " <> "was previously specified as a `#{old}` and later as `#{new}`" end + + defp merge_aliases(old_aliases, new_aliases) do + Enum.reduce(new_aliases, old_aliases, fn {alias, _}, aliases -> + Builder.add_select_alias(aliases, alias) + end) + end end diff --git a/.deps/ecto/lib/ecto/query/builder/update.ex b/.deps/ecto/lib/ecto/query/builder/update.ex @@ -67,7 +67,7 @@ defmodule Ecto.Query.Builder.Update do {compile, [{k, v} | runtime], params} {k, v}, {compile, runtime, params} -> k = escape_field!(k) - {v, {params, :acc}} = Builder.escape(v, type_for_key(op, {0, k}), {params, :acc}, vars, env) + {v, {params, _acc}} = Builder.escape(v, type_for_key(op, {0, k}), {params, %{}}, vars, env) {[{k, v} | compile], runtime, params} _, _acc -> Builder.error! "malformed #{inspect op} in update `#{Macro.to_string(kw)}`, " <> diff --git a/.deps/ecto/lib/ecto/query/builder/windows.ex b/.deps/ecto/lib/ecto/query/builder/windows.ex @@ -12,8 +12,8 @@ defmodule Ecto.Query.Builder.Windows do ## Examples - iex> escape(quote do [order_by: [desc: 13]] end, {[], :acc}, [x: 0], __ENV__) - {[order_by: [desc: 13]], [], {[], :acc}} + iex> escape(quote do [order_by: [desc: 13]] end, {[], %{}}, [x: 0], __ENV__) + {[order_by: [desc: 13]], [], {[], %{}}} """ @spec escape([Macro.t], {list, term}, Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) @@ -125,7 +125,7 @@ defmodule Ecto.Query.Builder.Windows do end defp escape_window(vars, {name, expr}, env) do - {compile_acc, runtime_acc, {params, _}} = escape(expr, {[], :acc}, vars, env) + {compile_acc, runtime_acc, {params, _acc}} = escape(expr, {[], %{}}, vars, env) {name, compile_acc, runtime_acc, Builder.escape_params(params)} end diff --git a/.deps/ecto/lib/ecto/query/inspect.ex b/.deps/ecto/lib/ecto/query/inspect.ex @@ -89,7 +89,7 @@ defimpl Inspect, for: Ecto.Query do |> generate_names() |> List.to_tuple() - from = bound_from(query.from, elem(names, 0)) + from = bound_from(query.from, elem(names, 0), names) joins = joins(query.joins, names) preloads = preloads(query.preloads) assocs = assocs(query.assocs, names) @@ -128,19 +128,20 @@ defimpl Inspect, for: Ecto.Query do ]) end - defp bound_from(nil, name), do: ["from #{name} in query"] + defp bound_from(nil, name, _names), do: ["from #{name} in query"] - defp bound_from(%{source: source} = from, name) do - ["from #{name} in #{inspect_source(source)}"] ++ kw_as_and_prefix(from) + defp bound_from(from, name, names) do + ["from #{name} in #{inspect_source(from, names)}"] ++ kw_as_and_prefix(from) end - defp inspect_source(%Ecto.Query{} = query), do: "^" <> inspect(query) - defp inspect_source(%Ecto.SubQuery{query: query}), do: "subquery(#{to_string(query)})" - defp inspect_source({source, nil}), do: inspect(source) - defp inspect_source({nil, schema}), do: inspect(schema) + defp inspect_source(%{source: %Ecto.Query{} = query}, _names), do: "^" <> inspect(query) + defp inspect_source(%{source: %Ecto.SubQuery{query: query}}, _names), do: "subquery(#{to_string(query)})" + defp inspect_source(%{source: {source, nil}}, _names), do: inspect(source) + defp inspect_source(%{source: {nil, schema}}, _names), do: inspect(schema) + defp inspect_source(%{source: {:fragment, _, _} = source} = part, names), do: "#{expr(source, names, part)}" - defp inspect_source({source, schema} = from) do - inspect(if source == schema.__schema__(:source), do: schema, else: from) + defp inspect_source(%{source: {source, schema}}, _names) do + inspect(if source == schema.__schema__(:source), do: schema, else: {source, schema}) end defp joins(joins, names) do @@ -154,17 +155,8 @@ defimpl Inspect, for: Ecto.Query do [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ maybe_on(on, names) end - defp join( - %JoinExpr{qual: qual, source: {:fragment, _, _} = source, on: on} = join = part, - name, - names - ) do - string = "#{name} in #{expr(source, names, part)}" - [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ [on: expr(on, names)] - end - - defp join(%JoinExpr{qual: qual, source: source, on: on} = join, name, names) do - string = "#{name} in #{inspect_source(source)}" + defp join(%JoinExpr{qual: qual, on: on} = join, name, names) do + string = "#{name} in #{inspect_source(join, names)}" [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ [on: expr(on, names)] end @@ -373,6 +365,7 @@ defimpl Inspect, for: Ecto.Query do defp from_sources(%Ecto.SubQuery{query: query}), do: from_sources(query.from.source) defp from_sources({source, schema}), do: schema || source defp from_sources(nil), do: "query" + defp from_sources({:fragment, _, _}), do: "fragment" defp join_sources(joins) do joins diff --git a/.deps/ecto/lib/ecto/query/planner.ex b/.deps/ecto/lib/ecto/query/planner.ex @@ -119,20 +119,21 @@ defmodule Ecto.Query.Planner do """ def query(query, operation, cache, adapter, counter) do {query, params, key} = plan(query, operation, adapter) - query_with_cache(key, query, operation, cache, adapter, counter, params) + {cast_params, dump_params} = Enum.unzip(params) + query_with_cache(key, query, operation, cache, adapter, counter, cast_params, dump_params) end - defp query_with_cache(key, query, operation, cache, adapter, counter, params) do + defp query_with_cache(key, query, operation, cache, adapter, counter, cast_params, dump_params) do case query_lookup(key, query, operation, cache, adapter, counter) do {_, select, prepared} -> - {build_meta(query, select), {:nocache, prepared}, params} + {build_meta(query, select), {:nocache, prepared}, cast_params, dump_params} {_key, :cached, select, cached} -> update = &cache_update(cache, key, &1) reset = &cache_reset(cache, key, &1) - {build_meta(query, select), {:cached, update, reset, cached}, params} + {build_meta(query, select), {:cached, update, reset, cached}, cast_params, dump_params} {_key, :cache, select, prepared} -> update = &cache_update(cache, key, &1) - {build_meta(query, select), {:cache, update, prepared}, params} + {build_meta(query, select), {:cache, update, prepared}, cast_params, dump_params} end end @@ -204,6 +205,7 @@ defmodule Ecto.Query.Planner do |> plan_combinations(adapter) |> plan_ctes(adapter) |> plan_wheres(adapter) + |> plan_select(adapter) |> plan_cache(operation, adapter) rescue e -> @@ -232,6 +234,11 @@ defmodule Ecto.Query.Planner do error!(query, "query must have a from expression") end + defp plan_from(%{from: %{source: {:fragment, _, _}}, preloads: preloads, assocs: assocs} = query, _adapter) + when assocs != [] or preloads != [] do + error!(query, "cannot preload associations with a fragment source") + end + defp plan_from(%{from: from} = query, adapter) do plan_source(query, from, adapter) end @@ -256,7 +263,7 @@ defmodule Ecto.Query.Planner do do: {expr, source} defp plan_source(query, %{source: {:fragment, _, _}, prefix: prefix} = expr, _adapter), - do: error!(query, expr, "cannot set prefix: #{inspect(prefix)} option for fragment joins") + do: error!(query, expr, "cannot set prefix: #{inspect(prefix)} option for fragment sources") defp plan_subquery(subquery, query, prefix, adapter, source?) do %{query: inner_query} = subquery @@ -300,14 +307,14 @@ defmodule Ecto.Query.Planner do defp normalize_subquery_select(query, adapter, source?) do {schema_or_source, expr, %{select: select} = query} = rewrite_subquery_select_expr(query, source?) {expr, _} = prewalk(expr, :select, query, select, 0, adapter) - {{:map, types}, _fields, _from} = collect_fields(expr, [], :never, query, select.take, true) + {{:map, types}, _fields, _from} = collect_fields(expr, [], :never, query, select.take, true, %{}) {query, subquery_source(schema_or_source, types)} end defp subquery_source(nil, types), do: {:map, types} defp subquery_source(name, types) when is_atom(name), do: {:struct, name, types} defp subquery_source({:source, schema, prefix, types}, only) do - types = Enum.map(only, fn {field, _} -> {field, Keyword.get(types, field, :any)} end) + types = Enum.map(only, fn {field, {:value, type}} -> {field, Keyword.get(types, field, type)} end) {:source, schema, prefix, types} end @@ -339,7 +346,8 @@ defmodule Ecto.Query.Planner do end defp subquery_select({:%{}, _, [{:|, _, [{:&, [], [ix]}, pairs]}]} = expr, take, query) do assert_subquery_fields!(query, expr, pairs) - {source, _} = source_take!(:select, query, take, ix, ix) + drop = Map.new(pairs, fn {key, _} -> {key, nil} end) + {source, _} = source_take!(:select, query, take, ix, ix, drop) # In case of map updates, we need to remove duplicated fields # at query time because we use the field names as aliases and @@ -352,7 +360,7 @@ defmodule Ecto.Query.Planner do {nil, pairs} end defp subquery_select({:&, _, [ix]}, take, query) do - {source, _} = source_take!(:select, query, take, ix, ix) + {source, _} = source_take!(:select, query, take, ix, ix, %{}) fields = subquery_source_fields(source) {keep_source_or_struct(source), subquery_fields(fields, ix)} end @@ -398,7 +406,7 @@ defmodule Ecto.Query.Planner do if valid_subquery_value?(value) do {key, value} else - error!(query, "atoms, maps, lists, tuples and sources are not allowed as map values in subquery, got: `#{Macro.to_string(expr)}`") + error!(query, "atoms, structs, maps, lists, tuples and sources are not allowed as map values in subquery, got: `#{Macro.to_string(expr)}`") end end) end @@ -406,7 +414,7 @@ defmodule Ecto.Query.Planner do defp valid_subquery_value?({_, _}), do: false defp valid_subquery_value?(args) when is_list(args), do: false defp valid_subquery_value?({container, _, args}) - when container in [:{}, :%{}, :&] and is_list(args), do: false + when container in [:{}, :%{}, :&, :%] and is_list(args), do: false defp valid_subquery_value?(nil), do: true defp valid_subquery_value?(arg) when is_atom(arg), do: is_boolean(arg) defp valid_subquery_value?(_), do: true @@ -603,6 +611,17 @@ defmodule Ecto.Query.Planner do %{query | wheres: wheres} end + @spec plan_select(Ecto.Query.t, module) :: Ecto.Query.t + defp plan_select(query, adapter) do + case query do + %{select: %{subqueries: [_ | _] = subqueries}} -> + subqueries = Enum.map(subqueries, &plan_subquery(&1, query, nil, adapter, false)) + put_in(query.select.subqueries, subqueries) + + query -> query + end + end + @doc """ Prepare the parameters by merging and casting them according to sources. """ @@ -617,9 +636,10 @@ defmodule Ecto.Query.Planner do {query, params, finalize_cache(query, operation, cache)} end - defp merge_cache(:from, _query, from, {cache, params}, _operation, _adapter) do + defp merge_cache(:from, query, from, {cache, params}, _operation, adapter) do {key, params} = source_cache(from, params) - {merge_cache({:from, key, from.hints}, cache, key != :nocache), params} + {params, source_cacheable?} = cast_and_merge_params(:from, query, from, params, adapter) + {merge_cache({:from, key, from.hints}, cache, source_cacheable? and key != :nocache), params} end defp merge_cache(kind, query, expr, {cache, params}, _operation, adapter) @@ -725,12 +745,16 @@ defmodule Ecto.Query.Planner do {v, type}, {acc, cacheable?} -> case cast_param(kind, query, expr, v, type, adapter) do - {:in, v} -> {Enum.reverse(v, acc), false} - v -> {[v | acc], cacheable?} + {cast_v, {:in, dump_v}} -> {split_in_params(cast_v, dump_v, acc), false} + cast_v_and_dump_v -> {[cast_v_and_dump_v | acc], cacheable?} end end end + defp split_in_params(cast_v, dump_v, acc) do + Enum.zip(cast_v, dump_v) |> Enum.reverse(acc) + end + defp merge_cache(_left, _right, false), do: :nocache defp merge_cache(_left, :nocache, true), do: :nocache defp merge_cache(left, right, true), do: [left|right] @@ -775,7 +799,7 @@ defmodule Ecto.Query.Planner do defp cast_param(_kind, query, expr, %DynamicExpr{}, _type, _value) do error! query, expr, "invalid dynamic expression", - "dynamic expressions can only be interpolated at the top level of where, having, group_by, order_by, update or a join's on" + "dynamic expressions can only be interpolated at the top level of where, having, group_by, order_by, select, update or a join's on" end defp cast_param(_kind, query, expr, [{key, _} | _], _type, _value) when is_atom(key) do error! query, expr, "invalid keyword list", @@ -801,8 +825,9 @@ defmodule Ecto.Query.Planner do defp cast_param(kind, type, v, adapter) do with {:ok, type} <- normalize_param(kind, type, v), - {:ok, v} <- cast_param(kind, type, v), - do: dump_param(adapter, type, v) + {:ok, cast_v} <- cast_param(kind, type, v), + {:ok, dump_v} <- dump_param(adapter, type, cast_v), + do: {:ok, {cast_v, dump_v}} end @doc """ @@ -891,6 +916,9 @@ defmodule Ecto.Query.Planner do def ensure_select(%{select: nil, from: %{source: {_, nil}}} = query, true) do error! query, "queries that do not have a schema need to explicitly pass a :select clause" end + def ensure_select(%{select: nil, from: %{source: {:fragment, _, _}}} = query, true) do + error! query, "queries from a fragment need to explicitly pass a :select clause" + end def ensure_select(%{select: nil} = query, true) do %{query | select: %SelectExpr{expr: {:&, [], [0]}, line: __ENV__.line, file: __ENV__.file}} end @@ -909,7 +937,7 @@ defmodule Ecto.Query.Planner do query |> normalize_query(operation, adapter, counter) |> elem(0) - |> normalize_select(keep_literals?(operation, query)) + |> normalize_select(keep_literals?(operation, query), true) rescue e -> # Reraise errors so we ignore the planner inner stacktrace @@ -987,8 +1015,8 @@ defmodule Ecto.Query.Planner do # Now compute the fields as keyword lists so we emit AS in Ecto query. %{select: %{expr: expr, take: take}} = inner_query - {{:map, types}, fields, _from} = collect_fields(expr, [], :never, inner_query, take, true) - fields = Enum.zip(Keyword.keys(types), Enum.reverse(fields)) + {{:map, types}, fields, _from} = collect_fields(expr, [], :never, inner_query, take, true, %{}) + fields = cte_fields(Keyword.keys(types), Enum.reverse(fields), []) inner_query = put_in(inner_query.select.fields, fields) {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) @@ -1027,7 +1055,7 @@ defmodule Ecto.Query.Planner do {combinations, counter} = Enum.reduce combinations, {[], counter}, fn {type, combination_query}, {combinations, counter} -> {combination_query, counter} = traverse_exprs(combination_query, operation, counter, fun) - {combination_query, _} = combination_query |> normalize_select(true) + {combination_query, _} = combination_query |> normalize_select(true, true) {[{type, combination_query} | combinations], counter} end @@ -1068,7 +1096,7 @@ defmodule Ecto.Query.Planner do try do inner_query = put_in inner_query.aliases[@parent_as], query {inner_query, counter} = normalize_query(inner_query, :all, adapter, counter) - {inner_query, _} = normalize_select(inner_query, true) + {inner_query, _} = normalize_select(inner_query, true, false) {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) inner_query = @@ -1171,7 +1199,7 @@ defmodule Ecto.Query.Planner do end defp prewalk({:json_extract_path, meta, [json_field, path]}, kind, query, expr, acc, _adapter) do - {{:., _, [{:&, _, [ix]}, field]}, _, []} = json_field + {{:., dot_meta, [{:&, amp_meta, [ix]}, field]}, expr_meta, []} = json_field case type!(kind, query, expr, ix, field) do {:parameterized, Ecto.Embedded, embed} -> @@ -1193,9 +1221,17 @@ defmodule Ecto.Query.Planner do end end + field_source = kind |> get_source!(query, ix) |> field_source(field) + + json_field = {{:., dot_meta, [{:&, amp_meta, [ix]}, field_source]}, expr_meta, []} {{:json_extract_path, meta, [json_field, path]}, acc} end + defp prewalk({:selected_as, [], [name]}, _kind, query, _expr, acc, _adapter) do + name = selected_as!(query.select.aliases, name) + {{:selected_as, [], [name]}, acc} + end + defp prewalk(%Ecto.Query.Tagged{value: v, type: type} = tagged, kind, query, expr, acc, adapter) do if Ecto.Type.base?(type) do {tagged, acc} @@ -1224,6 +1260,17 @@ defmodule Ecto.Query.Planner do {other, acc} end + defp selected_as!(select_aliases, name) do + case select_aliases do + %{^name => _} -> + name + + _ -> + raise ArgumentError, + "invalid alias: `#{inspect(name)}`. Use `selected_as/2` to define aliases in the outer most `select` expression." + end + end + defp dump_param(kind, query, expr, v, type, adapter) do type = field_type!(kind, query, expr, type) @@ -1252,11 +1299,11 @@ defmodule Ecto.Query.Planner do end end - defp normalize_select(%{select: nil} = query, _keep_literals?) do + defp normalize_select(%{select: nil} = query, _keep_literals?, _allow_alias?) do {query, nil} end - defp normalize_select(query, keep_literals?) do + defp normalize_select(query, keep_literals?, allow_alias?) do %{assocs: assocs, preloads: preloads, select: select} = query %{take: take, expr: expr} = select {tag, from_take} = Map.get(take, 0, {:any, []}) @@ -1276,7 +1323,12 @@ defmodule Ecto.Query.Planner do end {postprocess, fields, from} = - collect_fields(expr, [], :none, query, take, keep_literals?) + collect_fields(expr, [], :none, query, take, keep_literals?, %{}) + + # Convert selected_as/2 to a tuple so it can be aliased by the adapters. + # Don't convert if the select expression belongs to a CTE or subquery + # because those fields are already automatically aliased. + fields = normalize_selected_as(fields, allow_alias?, select.aliases) {fields, preprocess, from} = case from do @@ -1304,37 +1356,54 @@ defmodule Ecto.Query.Planner do {put_in(query.select.fields, fields), select} end + defp normalize_selected_as(fields, _allow_alias?, aliases) when aliases == %{}, do: fields + + defp normalize_selected_as(_fields, false, aliases) do + raise ArgumentError, + "`selected_as/2` can only be used in the outer most `select` expression. " <> + "If you are attempting to alias a field from a subquery or cte, it is not allowed " <> + "because the fields are automatically aliased by the corresponding map/struct key. " <> + "The following field aliases were specified: #{inspect(Map.keys(aliases))}." + end + + defp normalize_selected_as(fields, true, _aliases) do + Enum.map(fields, fn + {:selected_as, _, [select_expr, name]} -> {name, select_expr} + field -> field + end) + end + # Handling of source - defp collect_fields({:merge, _, [{:&, _, [0]}, right]}, fields, :none, query, take, keep_literals?) do - {expr, taken} = source_take!(:select, query, take, 0, 0) + defp collect_fields({:merge, _, [{:&, _, [0]}, right]}, fields, :none, query, take, keep_literals?, _drop) do + {expr, taken} = source_take!(:select, query, take, 0, 0, %{}) from = {:ok, {:source, :from}, expr, taken} - {right, right_fields, _from} = collect_fields(right, [], from, query, take, keep_literals?) + {right, right_fields, _from} = collect_fields(right, [], from, query, take, keep_literals?, %{}) from = {:ok, {:merge, {:source, :from}, right}, expr, taken ++ Enum.reverse(right_fields)} {{:source, :from}, fields, from} end - defp collect_fields({:&, _, [0]}, fields, :none, query, take, _keep_literals?) do - {expr, taken} = source_take!(:select, query, take, 0, 0) + defp collect_fields({:&, _, [0]}, fields, :none, query, take, _keep_literals?, drop) do + {expr, taken} = source_take!(:select, query, take, 0, 0, drop) {{:source, :from}, fields, {:ok, {:source, :from}, expr, taken}} end - defp collect_fields({:&, _, [0]}, fields, from, _query, _take, _keep_literals?) + defp collect_fields({:&, _, [0]}, fields, from, _query, _take, _keep_literals?, _drop) when from != :never do {{:source, :from}, fields, from} end - defp collect_fields({:&, _, [ix]}, fields, from, query, take, _keep_literals?) do - {expr, taken} = source_take!(:select, query, take, ix, ix) + defp collect_fields({:&, _, [ix]}, fields, from, query, take, _keep_literals?, drop) do + {expr, taken} = source_take!(:select, query, take, ix, ix, drop) {expr, Enum.reverse(taken, fields), from} end # Expression handling defp collect_fields({agg, _, [{{:., dot_meta, [{:&, _, [_]}, _]}, _, []} | _]} = expr, - fields, from, _query, _take, _keep_literals?) + fields, from, _query, _take, _keep_literals?, _drop) when agg in @aggs do type = case agg do @@ -1352,156 +1421,164 @@ defmodule Ecto.Query.Planner do {{:value, type}, [expr | fields], from} end - defp collect_fields({:filter, _, [call, _]} = expr, fields, from, query, take, keep_literals?) do + defp collect_fields({:filter, _, [call, _]} = expr, fields, from, query, take, keep_literals?, _drop) do case call do {agg, _, _} when agg in @aggs -> :ok {:fragment, _, [_ | _]} -> :ok _ -> error!(query, "filter(...) expects the first argument to be an aggregate expression, got: `#{Macro.to_string(expr)}`") end - {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?) + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?, %{}) {type, [expr | fields], from} end - defp collect_fields({:coalesce, _, [left, right]} = expr, fields, from, query, take, _keep_literals?) do - {left_type, _, _} = collect_fields(left, fields, from, query, take, true) - {right_type, _, _} = collect_fields(right, fields, from, query, take, true) + defp collect_fields({:coalesce, _, [left, right]} = expr, fields, from, query, take, _keep_literals?, _drop) do + {left_type, _, _} = collect_fields(left, fields, from, query, take, true, %{}) + {right_type, _, _} = collect_fields(right, fields, from, query, take, true, %{}) type = if left_type == right_type, do: left_type, else: {:value, :any} {type, [expr | fields], from} end - defp collect_fields({:over, _, [call, window]} = expr, fields, from, query, take, keep_literals?) do + defp collect_fields({:over, _, [call, window]} = expr, fields, from, query, take, keep_literals?, _drop) do if is_atom(window) and not Keyword.has_key?(query.windows, window) do error!(query, "unknown window #{inspect window} given to over/2") end - {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?) + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?, %{}) {type, [expr | fields], from} end defp collect_fields({{:., dot_meta, [{:&, _, [_]}, _]}, _, []} = expr, - fields, from, _query, _take, _keep_literals?) do + fields, from, _query, _take, _keep_literals?, _drop) do {{:value, Keyword.fetch!(dot_meta, :type)}, [expr | fields], from} end - defp collect_fields({left, right}, fields, from, query, take, keep_literals?) do + defp collect_fields({left, right}, fields, from, query, take, keep_literals?, _drop) do {args, fields, from} = collect_args([left, right], fields, from, query, take, keep_literals?, []) {{:tuple, args}, fields, from} end - defp collect_fields({:{}, _, args}, fields, from, query, take, keep_literals?) do + defp collect_fields({:{}, _, args}, fields, from, query, take, keep_literals?, _drop) do {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) {{:tuple, args}, fields, from} end - defp collect_fields({:%{}, _, [{:|, _, [data, args]}]}, fields, from, query, take, keep_literals?) do - {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?) + defp collect_fields({:%{}, _, [{:|, _, [data, args]}]}, fields, from, query, take, keep_literals?, _drop) do + drop = Map.new(args, fn {key, _} -> {key, nil} end) + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?, drop) {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) {{:map, data, args}, fields, from} end - defp collect_fields({:%{}, _, args}, fields, from, query, take, keep_literals?) do + defp collect_fields({:%{}, _, args}, fields, from, query, take, keep_literals?, _drop) do {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) {{:map, args}, fields, from} end defp collect_fields({:%, _, [name, {:%{}, _, [{:|, _, [data, args]}]}]}, - fields, from, query, take, keep_literals?) do - {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?) + fields, from, query, take, keep_literals?, _drop) do + drop = Map.new(args, fn {key, _} -> {key, nil} end) + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?, drop) {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) struct!(name, args) {{:struct, name, data, args}, fields, from} end - defp collect_fields({:%, _, [name, {:%{}, _, args}]}, fields, from, query, take, keep_literals?) do + defp collect_fields({:%, _, [name, {:%{}, _, args}]}, fields, from, query, take, keep_literals?, _drop) do {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) struct!(name, args) {{:struct, name, args}, fields, from} end - defp collect_fields({:merge, _, args}, fields, from, query, take, keep_literals?) do + defp collect_fields({:merge, _, args}, fields, from, query, take, keep_literals?, _drop) do {[left, right], fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) {{:merge, left, right}, fields, from} end - defp collect_fields({:date_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?) do - case collect_fields(arg, fields, from, query, take, keep_literals?) do + defp collect_fields({:date_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?, _drop) do + case collect_fields(arg, fields, from, query, take, keep_literals?, %{}) do {{:value, :any}, _, _} -> {{:value, :date}, [expr | fields], from} {type, _, _} -> {type, [expr | fields], from} end end - defp collect_fields({:datetime_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?) do - case collect_fields(arg, fields, from, query, take, keep_literals?) do + defp collect_fields({:datetime_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?, _drop) do + case collect_fields(arg, fields, from, query, take, keep_literals?, %{}) do {{:value, :any}, _, _} -> {{:value, :naive_datetime}, [expr | fields], from} {type, _, _} -> {type, [expr | fields], from} end end - defp collect_fields(args, fields, from, query, take, keep_literals?) when is_list(args) do + defp collect_fields(args, fields, from, query, take, keep_literals?, _drop) when is_list(args) do {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) {{:list, args}, fields, from} end - defp collect_fields(expr, fields, from, _query, _take, true) when is_binary(expr) do + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_binary(expr) do {{:value, :binary}, [expr | fields], from} end - defp collect_fields(expr, fields, from, _query, _take, true) when is_integer(expr) do + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_integer(expr) do {{:value, :integer}, [expr | fields], from} end - defp collect_fields(expr, fields, from, _query, _take, true) when is_float(expr) do + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_float(expr) do {{:value, :float}, [expr | fields], from} end - defp collect_fields(expr, fields, from, _query, _take, true) when is_boolean(expr) do + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_boolean(expr) do {{:value, :boolean}, [expr | fields], from} end - defp collect_fields(nil, fields, from, _query, _take, true) do + defp collect_fields(nil, fields, from, _query, _take, true, _drop) do {{:value, :any}, [nil | fields], from} end - defp collect_fields(expr, fields, from, _query, _take, _keep_literals?) when is_atom(expr) do + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?, _drop) when is_atom(expr) do {expr, fields, from} end - defp collect_fields(expr, fields, from, _query, _take, false) + defp collect_fields(expr, fields, from, _query, _take, false, _drop) when is_binary(expr) or is_number(expr) do {expr, fields, from} end - defp collect_fields(%Ecto.Query.Tagged{tag: tag} = expr, fields, from, _query, _take, _keep_literals?) do + defp collect_fields(%Ecto.Query.Tagged{tag: tag} = expr, fields, from, _query, _take, _keep_literals?, _drop) do {{:value, tag}, [expr | fields], from} end - defp collect_fields({op, _, [_]} = expr, fields, from, _query, _take, _keep_literals?) + defp collect_fields({op, _, [_]} = expr, fields, from, _query, _take, _keep_literals?, _drop) when op in ~w(not is_nil)a do {{:value, :boolean}, [expr | fields], from} end - defp collect_fields({op, _, [_, _]} = expr, fields, from, _query, _take, _keep_literals?) + defp collect_fields({op, _, [_, _]} = expr, fields, from, _query, _take, _keep_literals?, _drop) when op in ~w(< > <= >= == != and or like ilike)a do {{:value, :boolean}, [expr | fields], from} end - defp collect_fields(expr, fields, from, _query, _take, _keep_literals?) do + defp collect_fields({:selected_as, _, [select_expr, _name]} = expr, fields, from, query, take, keep_literals?, _drop) do + {type, _, _} = collect_fields(select_expr, fields, from, query, take, keep_literals?, %{}) + {type, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?, _drop) do {{:value, :any}, [expr | fields], from} end defp collect_kv([{key, value} | elems], fields, from, query, take, keep_literals?, acc) do - {key, fields, from} = collect_fields(key, fields, from, query, take, keep_literals?) - {value, fields, from} = collect_fields(value, fields, from, query, take, keep_literals?) + {key, fields, from} = collect_fields(key, fields, from, query, take, keep_literals?, %{}) + {value, fields, from} = collect_fields(value, fields, from, query, take, keep_literals?, %{}) collect_kv(elems, fields, from, query, take, keep_literals?, [{key, value} | acc]) end + defp collect_kv([], fields, from, _query, _take, _keep_literals?, acc) do {Enum.reverse(acc), fields, from} end defp collect_args([elem | elems], fields, from, query, take, keep_literals?, acc) do - {elem, fields, from} = collect_fields(elem, fields, from, query, take, keep_literals?) + {elem, fields, from} = collect_fields(elem, fields, from, query, take, keep_literals?, %{}) collect_args(elems, fields, from, query, take, keep_literals?, [elem | acc]) end defp collect_args([], fields, from, _query, _take, _keep_literals?, acc) do @@ -1527,7 +1604,7 @@ defmodule Ecto.Query.Planner do defp collect_assocs(exprs, fields, query, tag, take, [{assoc, {ix, children}}|tail]) do to_take = get_preload_source!(query, ix) {fetch, take_children} = fetch_assoc(tag, take, assoc) - {expr, taken} = take!(to_take, query, fetch, assoc, ix) + {expr, taken} = take!(to_take, query, fetch, assoc, ix, %{}) exprs = [expr | exprs] fields = Enum.reverse(taken, fields) {exprs, fields} = collect_assocs(exprs, fields, query, tag, take_children, children) @@ -1545,12 +1622,12 @@ defmodule Ecto.Query.Planner do end end - defp source_take!(kind, query, take, field, ix) do + defp source_take!(kind, query, take, field, ix, drop) do source = get_source!(kind, query, ix) - take!(source, query, Access.fetch(take, field), field, ix) + take!(source, query, Access.fetch(take, field), field, ix, drop) end - defp take!(source, query, fetched, field, ix) do + defp take!(source, query, fetched, field, ix, drop) do case {fetched, source} do {{:ok, {:struct, _}}, {:fragment, _, _}} -> error! query, "it is not possible to return a struct subset of a fragment" @@ -1567,7 +1644,7 @@ defmodule Ecto.Query.Planner do {{:ok, {kind, fields}}, {source, schema, prefix}} when is_binary(source) -> dumper = if schema, do: schema.__schema__(:dump), else: %{} schema = if kind == :map, do: nil, else: schema - {types, fields} = select_dump(List.wrap(fields), dumper, ix) + {types, fields} = select_dump(List.wrap(fields), dumper, ix, drop) {{:source, {source, schema}, prefix || query.prefix, types}, fields} {{:ok, {_, fields}}, _} -> @@ -1580,7 +1657,8 @@ defmodule Ecto.Query.Planner do {{:value, :map}, [{:&, [], [ix]}]} {:error, {source, schema, prefix}} -> - {types, fields} = select_dump(schema.__schema__(:query_fields), schema.__schema__(:dump), ix) + {types, fields} = select_dump(schema.__schema__(:query_fields), schema.__schema__(:dump), ix, drop) + {{:source, {source, schema}, prefix || query.prefix, types}, fields} {:error, %Ecto.SubQuery{select: select}} -> @@ -1589,11 +1667,11 @@ defmodule Ecto.Query.Planner do end end - defp select_dump(fields, dumper, ix) do + defp select_dump(fields, dumper, ix, drop) do fields |> Enum.reverse |> Enum.reduce({[], []}, fn - field, {types, exprs} when is_atom(field) -> + field, {types, exprs} when is_atom(field) and not is_map_key(drop, field) -> {source, type} = Map.get(dumper, field, {field, :any}) {[{field, type} | types], [select_field(source, ix) | exprs]} _field, acc -> @@ -1826,6 +1904,20 @@ defmodule Ecto.Query.Planner do field end + defp cte_fields([_key | _rest_keys], [{:selected_as, _, [_, _]} | _rest_fields], _acc) do + raise ArgumentError, + "`selected_as/2` can only be used in the outer most `select` expression. " <> + "If you are attempting to alias a field from a subquery or cte, it is not allowed " <> + "because the fields are automatically aliased by the corresponding map/struct key." + end + + defp cte_fields([key | rest_keys], [field | rest_fields], acc) do + cte_fields(rest_keys, rest_fields, [{key, field} | acc]) + end + + defp cte_fields(_keys, [], acc), do: :lists.reverse(acc) + defp cte_fields([], _fields, acc), do: :lists.reverse(acc) + defp assert_update!(%Ecto.Query{updates: updates} = query, operation) do changes = Enum.reduce(updates, %{}, fn update, acc -> diff --git a/.deps/ecto/lib/ecto/repo.ex b/.deps/ecto/lib/ecto/repo.ex @@ -163,7 +163,8 @@ defmodule Ecto.Repo do * `:result` - the query result * `:params` - the query parameters * `:query` - the query sent to the database as a string - * `:source` - the source the query was made on (may be nil) + * `:source` - the source the query was made on (may be `nil`) + * `:stacktrace` - the stacktrace information, if enabled, or `nil` * `:options` - extra options given to the repo operation under `:telemetry_options` @@ -660,7 +661,7 @@ defmodule Ecto.Repo do """ @doc group: "Query API" @callback get(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: - Ecto.Schema.t() | nil + Ecto.Schema.t() | term | nil @doc """ Similar to `c:get/3` but raises `Ecto.NoResultsError` if no record was found. @@ -686,7 +687,7 @@ defmodule Ecto.Repo do """ @doc group: "Query API" @callback get!(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: - Ecto.Schema.t() + Ecto.Schema.t() | term @doc """ Fetches a single result from the query. @@ -717,7 +718,7 @@ defmodule Ecto.Repo do queryable :: Ecto.Queryable.t(), clauses :: Keyword.t() | map, opts :: Keyword.t() - ) :: Ecto.Schema.t() | nil + ) :: Ecto.Schema.t() | term | nil @doc """ Similar to `c:get_by/3` but raises `Ecto.NoResultsError` if no record was found. @@ -748,7 +749,7 @@ defmodule Ecto.Repo do queryable :: Ecto.Queryable.t(), clauses :: Keyword.t() | map, opts :: Keyword.t() - ) :: Ecto.Schema.t() + ) :: Ecto.Schema.t() | term @doc """ Reloads a given schema or schema list from the database. @@ -917,7 +918,7 @@ defmodule Ecto.Repo do """ @doc group: "Query API" @callback one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: - Ecto.Schema.t() | nil + Ecto.Schema.t() | term | nil @doc """ Similar to `c:one/2` but raises `Ecto.NoResultsError` if no record was found. @@ -938,7 +939,7 @@ defmodule Ecto.Repo do """ @doc group: "Query API" @callback one!(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: - Ecto.Schema.t() + Ecto.Schema.t() | term @doc """ Preloads all associations on the given struct or structs. @@ -1077,7 +1078,7 @@ defmodule Ecto.Repo do MyRepo.all(query) """ @doc group: "Query API" - @callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t()] + @callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t() | term] @doc """ Returns a lazy enumerable that emits all entries from the data store @@ -1157,6 +1158,9 @@ defmodule Ecto.Repo do from(p in Post, where: p.id < 10, update: [set: [title: fragment("upper(?)", ^new_title)]]) |> MyRepo.update_all([]) + from(p in Post, where: p.id < 10, update: [set: [visits: p.visits * 1000]]) + |> MyRepo.update_all([]) + """ @doc group: "Query API" @callback update_all( @@ -1759,9 +1763,9 @@ defmodule Ecto.Repo do repo.insert!(%Post{}) end) - If an unhandled error occurs the transaction will be rolled back - and the error will bubble up from the transaction function. - If no error occurred the transaction will be committed when the + If an Elixir exception occurs the transaction will be rolled back + and the exception will bubble up from the transaction function. + If no exception occurs, the transaction is committed when the function returns. A transaction can be explicitly rolled back by calling `c:rollback/1`, this will immediately leave the function and return the value given to `rollback` as `{:error, value}`. @@ -1769,11 +1773,13 @@ defmodule Ecto.Repo do A successful transaction returns the value returned by the function wrapped in a tuple as `{:ok, value}`. + ### Nested transactions + If `c:transaction/2` is called inside another transaction, the function is simply executed, without wrapping the new transaction call in any way. If there is an error in the inner transaction and the error is rescued, or the inner transaction is rolled back, the whole outer - transaction is marked as tainted, guaranteeing nothing will be committed. + transaction is aborted, guaranteeing nothing will be committed. Below is an example of how rollbacks work with nested transactions: @@ -1794,16 +1800,24 @@ defmodule Ecto.Repo do # `rollback/1` stops execution, so code here won't be run end) - # When the inner transaction was rolled back, execution in this outer - # transaction is also stopped immediately. When this occurs, the - # outer transaction(s) return `{:error, :rollback}`. + # The transaction here is now aborted and any further + # operation will raise an exception. end) + See the "Aborted transactions" section for more examples of aborted + transactions and how to handle them. + + In practice, managing nested transactions can become complex quickly. + For this reason, Ecto provides `Ecto.Multi` for composing transactions. + ## Use with Ecto.Multi - Besides functions, transactions can be used with an `Ecto.Multi` struct. - A transaction will be started, all operations applied and in case of - success committed returning `{:ok, changes}`: + `c:transaction/2` also accepts the `Ecto.Multi` struct as first argument. + `Ecto.Multi` allows you to compose transactions operations, step by step, + and manage what happens in case of success or failure. + + When an `Ecto.Multi` is given to this function, a transaction will be started, + all operations applied and in case of success committed returning `{:ok, changes}`: # With Ecto.Multi Ecto.Multi.new() @@ -1811,11 +1825,52 @@ defmodule Ecto.Repo do |> MyRepo.transaction In case of any errors the transaction will be rolled back and - `{:error, failed_operation, failed_value, changes_so_far}` will be - returned. + `{:error, failed_operation, failed_value, changes_so_far}` will be returned. + + Explore the `Ecto.Multi` documentation to learn more and find detailed examples. + + ## Aborted transactions + + When an operation inside a transaction fails, the transaction is aborted in the database. + For instance, if you attempt an insert that violates a unique constraint, the insert fails + and the transaction is aborted. In such cases, any further operation inside the transaction + will raise exceptions. + + Take the following transaction as an example: + + Repo.transaction(fn repo -> + case repo.insert(changeset) do + {:ok, post} -> + repo.insert(%Status{value: "success"}) + + {:error, changeset} -> + repo.insert(%Status{value: "failure"}) + end + end) - You can read more about using transactions with `Ecto.Multi` as well as - see some examples in the `Ecto.Multi` documentation. + If the changeset is valid, but the insert operation fails due to a database constraint, + the subsequent `repo.insert(%Failure{})` operation will raise an exception because the + database has already aborted the transaction and thus making the operation invalid. + In Postgres, the exception would look like this: + + ** (Postgrex.Error) ERROR 25P02 (in_failed_sql_transaction) current transaction is aborted, commands ignored until end of transaction block + + If the changeset is invalid before it reaches the database due to a validation error, + no statement is sent to the database, an `:error` tuple is returned, and `repo.insert(%Failure{})` + operation will execute as usual. + + We have two options to deal with such scenarios: + + If don't want to change the semantics of your code, you can also use the savepoints + feature by passing the `:mode` option like this: `repo.insert(changeset, mode: :savepoint)`. + In case of an exception, the transaction will rollback to the savepoint and prevent + the transaction from failing. + + Another alternative is to handle this operation outside of the transaction. + For example, you can choose to perform an explicit `repo.rollback` call in the + `{:error, changeset}` clause and then perform the `repo.insert(%Failure{})` outside + of the transaction. You might also consider using `Ecto.Multi`, as they automatically + rollback whenever an operation fails. ## Working with processes diff --git a/.deps/ecto/lib/ecto/repo/preloader.ex b/.deps/ecto/lib/ecto/repo/preloader.ex @@ -63,7 +63,8 @@ defmodule Ecto.Repo.Preloader do if sample = Enum.find(structs, & &1) do module = sample.__struct__ prefix = preload_prefix(tuplet, sample) - {assocs, throughs} = expand(module, preloads, {%{}, %{}}) + {assocs, throughs, embeds} = expand(module, preloads, {%{}, %{}, []}) + structs = preload_embeds(structs, embeds, repo_name, tuplet) {fetched_assocs, to_fetch_queries} = prepare_queries(structs, module, assocs, prefix, repo_name, tuplet) @@ -154,6 +155,40 @@ defmodule Ecto.Repo.Preloader do defp preload_assocs([], [], _repo_name, _tuplet), do: [] + defp preload_embeds(structs, [], _repo_name, _tuplet), do: structs + + defp preload_embeds(structs, [embed | embeds], repo_name, tuplet) do + + {%{field: field, cardinality: card}, sub_preloads} = embed + + {embed_structs, counts} = + Enum.flat_map_reduce(structs, [], fn + %{^field => embeds}, counts when is_list(embeds) -> {embeds, [length(embeds) | counts]} + %{^field => nil}, counts -> {[], [0 | counts]} + %{^field => embed}, counts -> {[embed], [1 | counts]} + nil, counts -> {[], [0 | counts]} + struct, _counts -> raise ArgumentError, "expected #{inspect(struct)} to contain embed `#{field}`" + end) + + embed_structs = preload_each(embed_structs, repo_name, sub_preloads, tuplet) + structs = load_embeds(card, field, structs, embed_structs, Enum.reverse(counts), []) + preload_embeds(structs, embeds, repo_name, tuplet) + end + + defp load_embeds(_card, _field, [], [], [], acc), do: Enum.reverse(acc) + + defp load_embeds(card, field, [struct | structs], embed_structs, [0 | counts], acc), + do: load_embeds(card, field, structs, embed_structs, counts, [struct | acc]) + + defp load_embeds(:one, field, [struct | structs], [embed_struct | embed_structs], [1 | counts], acc), + do: load_embeds(:one, field, structs, embed_structs, counts, [Map.put(struct, field, embed_struct) | acc]) + + defp load_embeds(:many, field, [struct | structs], embed_structs, [count | counts], acc) do + {current_embeds, rest_embeds} = split_n(embed_structs, count, []) + acc = [Map.put(struct, field, Enum.reverse(current_embeds)) | acc] + load_embeds(:many, field, structs, rest_embeds, counts, acc) + end + defp maybe_unpack_query(false, queries), do: {[], [], queries} defp maybe_unpack_query(true, [{ids, structs} | queries]), do: {ids, structs, queries} @@ -217,13 +252,20 @@ defmodule Ecto.Repo.Preloader do # If we are returning many results, we must sort by the key too query = - case card do - :many -> + case {card, query.combinations} do + {:many, [{kind, _} | []]} -> + raise ArgumentError, + "`#{kind}` queries must be wrapped inside of a subquery " <> + "when preloading a `has_many` or `many_to_many` association. " <> + "You must also ensure that all members of the `#{kind}` query " <> + "select the parent's foreign key" + + {:many, _} -> update_in query.order_bys, fn order_bys -> [%Ecto.Query.QueryExpr{expr: preload_order(assoc, query, field), params: [], file: __ENV__.file, line: __ENV__.line}|order_bys] end - :one -> + {:one, _} -> query end @@ -493,22 +535,36 @@ defmodule Ecto.Repo.Preloader do ## Expand def expand(schema, preloads, acc) do - Enum.reduce(preloads, acc, fn {preload, {fields, query, sub_preloads}}, {assocs, throughs} -> - assoc = association_from_schema!(schema, preload) - info = assoc.__struct__.preload_info(assoc) + Enum.reduce(preloads, acc, fn {preload, {fields, query, sub_preloads}}, + {assocs, throughs, embeds} -> + assoc_or_embed = association_or_embed!(schema, preload) + + info = assoc_or_embed.__struct__.preload_info(assoc_or_embed) case info do {:assoc, _, _} -> - value = {info, fields, query, sub_preloads} + value = {info, fields, query, sub_preloads} assocs = Map.update(assocs, preload, value, &merge_preloads(preload, value, &1)) - {assocs, throughs} + {assocs, throughs, embeds} + {:through, _, through} -> through = through |> Enum.reverse() |> Enum.reduce({fields, query, sub_preloads}, &{nil, nil, [{&1, &2}]}) |> elem(2) - expand(schema, through, {assocs, Map.put(throughs, preload, info)}) + + expand(schema, through, {assocs, Map.put(throughs, preload, info), embeds}) + + :embed -> + if sub_preloads == [] do + raise ArgumentError, + "cannot preload embedded field #{inspect(assoc_or_embed.field)} " <> + "without also preloading one of its associations as it has no effect" + end + + embeds = [{assoc_or_embed, sub_preloads} | embeds] + {assocs, throughs, embeds} end end) end @@ -522,12 +578,9 @@ defmodule Ecto.Repo.Preloader do "with different queries: #{inspect left} and #{inspect right}" end - # Since there is some ambiguity between assoc and queries. - # We reimplement this function here for nice error messages. - defp association_from_schema!(schema, assoc) do - schema.__schema__(:association, assoc) || - raise ArgumentError, - "schema #{inspect schema} does not have association #{inspect assoc}#{maybe_module(assoc)}" + defp association_or_embed!(schema, preload) do + schema.__schema__(:association, preload) || schema.__schema__(:embed, preload) || + raise ArgumentError, "schema #{inspect schema} does not have association or embed #{inspect preload}#{maybe_module(preload)}" end defp maybe_module(assoc) do diff --git a/.deps/ecto/lib/ecto/repo/queryable.ex b/.deps/ecto/lib/ecto/repo/queryable.ex @@ -29,12 +29,16 @@ defmodule Ecto.Repo.Queryable do {query, opts} = repo.prepare_query(:stream, query, opts) query = attach_prefix(query, opts) - {query_meta, prepared, params} = Planner.query(query, :all, cache, adapter, 0) + + {query_meta, prepared, cast_params, dump_params} = + Planner.query(query, :all, cache, adapter, 0) + + opts = Keyword.put(opts, :cast_params, cast_params) case query_meta do %{select: nil} -> adapter_meta - |> adapter.stream(query_meta, prepared, params, opts) + |> adapter.stream(query_meta, prepared, dump_params, opts) |> Stream.flat_map(fn {_, nil} -> [] end) %{select: select, preloads: preloads} -> @@ -51,7 +55,7 @@ defmodule Ecto.Repo.Queryable do end preprocessor = preprocessor(from, preprocess, adapter) - stream = adapter.stream(adapter_meta, query_meta, prepared, params, opts) + stream = adapter.stream(adapter_meta, query_meta, prepared, dump_params, opts) postprocessor = postprocessor(from, postprocess, take, adapter) stream @@ -202,11 +206,15 @@ defmodule Ecto.Repo.Queryable do {query, opts} = repo.prepare_query(operation, query, opts) query = attach_prefix(query, opts) - {query_meta, prepared, params} = Planner.query(query, operation, cache, adapter, 0) + + {query_meta, prepared, cast_params, dump_params} = + Planner.query(query, operation, cache, adapter, 0) + + opts = Keyword.put(opts, :cast_params, cast_params) case query_meta do %{select: nil} -> - adapter.execute(adapter_meta, query_meta, prepared, params, opts) + adapter.execute(adapter_meta, query_meta, prepared, dump_params, opts) %{select: select, sources: sources, preloads: preloads} -> %{ @@ -218,7 +226,7 @@ defmodule Ecto.Repo.Queryable do } = select preprocessor = preprocessor(from, preprocess, adapter) - {count, rows} = adapter.execute(adapter_meta, query_meta, prepared, params, opts) + {count, rows} = adapter.execute(adapter_meta, query_meta, prepared, dump_params, opts) postprocessor = postprocessor(from, postprocess, take, adapter) {count, diff --git a/.deps/ecto/lib/ecto/repo/schema.ex b/.deps/ecto/lib/ecto/repo/schema.ex @@ -44,7 +44,7 @@ defmodule Ecto.Repo.Schema do |> returning(opts) |> fields_to_sources(dumper) - {rows_or_query, header, placeholder_values, counter} = + {rows_or_query, header, row_cast_params, placeholder_cast_params, placeholder_dump_params, counter} = extract_header_and_fields(repo, rows_or_query, schema, dumper, autogen_id, placeholder_map, adapter, opts) schema_meta = metadata(schema, prefix, source, autogen_id, nil, opts) @@ -52,10 +52,11 @@ defmodule Ecto.Repo.Schema do on_conflict = Keyword.get(opts, :on_conflict, :raise) conflict_target = Keyword.get(opts, :conflict_target, []) conflict_target = conflict_target(conflict_target, dumper) - on_conflict = on_conflict(on_conflict, conflict_target, schema_meta, counter, adapter) + {on_conflict, conflict_cast_params} = on_conflict(on_conflict, conflict_target, schema_meta, counter, adapter) + opts = Keyword.put(opts, :cast_params, placeholder_cast_params ++ row_cast_params ++ conflict_cast_params) {count, rows_or_query} = - adapter.insert_all(adapter_meta, schema_meta, header, rows_or_query, on_conflict, return_sources, placeholder_values, opts) + adapter.insert_all(adapter_meta, schema_meta, header, rows_or_query, on_conflict, return_sources, placeholder_dump_params, opts) {count, postprocess(rows_or_query, return_fields_or_types, adapter, schema, schema_meta)} end @@ -81,44 +82,33 @@ defmodule Ecto.Repo.Schema do when is_list(rows) do mapper = init_mapper(schema, dumper, adapter, placeholder_map) - {rows, {header, has_query?, placeholder_dump, _}} = - Enum.map_reduce(rows, {%{}, false, %{}, 1}, fn fields, acc -> - {fields, {header, has_query?, placeholder_dump, counter}} = Enum.map_reduce(fields, acc, mapper) + {rows, {header, placeholder_dump, _}} = + Enum.map_reduce(rows, {%{}, %{}, 1}, fn fields, acc -> + {fields, {header, placeholder_dump, counter}} = Enum.map_reduce(fields, acc, mapper) {fields, header} = autogenerate_id(autogen_id, fields, header, adapter) - {fields, {header, has_query?, placeholder_dump, counter}} + {fields, {header, placeholder_dump, counter}} end) header = Map.keys(header) placeholder_size = map_size(placeholder_dump) - counter = fn -> - Enum.reduce( - rows, - placeholder_size, - &(Enum.count(&1, fn {_, val} -> not match?({:placeholder, _}, val) end) + &2) - ) - end - - placeholder_vals_list = + {placeholder_cast_params, placeholder_dump_params} = placeholder_dump - |> Enum.map(fn {_, {idx, _, value}} -> {idx, value} end) + |> Enum.map(fn {_, {idx, _, cast_value, dump_value}} -> {idx, cast_value, dump_value} end) |> Enum.sort - |> Enum.map(&elem(&1, 1)) + |> Enum.map(&{elem(&1, 1), elem(&1, 2)}) + |> Enum.unzip - if has_query? do - rows = plan_query_in_rows(rows, header, adapter) - {rows, header, placeholder_vals_list, counter} - else - {rows, header, placeholder_vals_list, counter} - end + {rows, row_cast_params, counter} = plan_query_in_rows(rows, header, adapter, placeholder_size) + {rows, header, row_cast_params, placeholder_cast_params, placeholder_dump_params, fn -> counter end} end defp extract_header_and_fields(repo, %Ecto.Query{} = query, _schema, _dumper, _autogen_id, _placeholder_map, adapter, opts) do {query, opts} = repo.prepare_query(:insert_all, query, opts) query = attach_prefix(query, opts) - {query, params} = Ecto.Adapter.Queryable.plan_query(:insert_all, adapter, query) + {query, cast_params, dump_params} = Ecto.Adapter.Queryable.plan_query(:insert_all, adapter, query) header = case query.select do %Ecto.Query.SelectExpr{expr: {:%{}, _ctx, args}} -> @@ -144,9 +134,9 @@ defmodule Ecto.Repo.Schema do """ end - counter = fn -> length(params) end + counter = fn -> length(dump_params) end - {{query, params}, header, [], counter} + {{query, dump_params}, header, cast_params, [], [], counter} end defp extract_header_and_fields(_repo, rows_or_query, _schema, _dumper, _autogen_id, _placeholder_map, _adapter, _opts) do @@ -176,28 +166,28 @@ defmodule Ecto.Repo.Schema do end defp extract_value(source, value, type, placeholder_map, acc, dumper) do - {header, has_query?, placeholder_dump, counter} = acc + {header, placeholder_dump, counter} = acc case value do %Ecto.Query{} = query -> - {{source, query}, {Map.put(header, source, true), true, placeholder_dump, counter}} + {{source, query}, {Map.put(header, source, true), placeholder_dump, counter}} {:placeholder, key} -> {value, placeholder_dump, counter} = extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) {{source, value}, - {Map.put(header, source, true), has_query?, placeholder_dump, counter}} + {Map.put(header, source, true), placeholder_dump, counter}} - value -> - {{source, dumper.(value)}, - {Map.put(header, source, true), has_query?, placeholder_dump, counter}} + cast_value -> + {{source, cast_value, dumper.(value)}, + {Map.put(header, source, true), placeholder_dump, counter}} end end defp extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) do case placeholder_dump do - %{^key => {idx, ^type, _}} -> + %{^key => {idx, ^type, _, _}} -> {{:placeholder, idx}, placeholder_dump, counter} %{^key => {_, type, _}} -> @@ -206,42 +196,47 @@ defmodule Ecto.Repo.Schema do "The key #{inspect(key)} has already been dumped as a #{inspect(type)}" %{} -> - dumped_value = + {cast_value, dump_value} = case placeholder_map do - %{^key => val} -> - dumper.(val) + %{^key => cast_value} -> + {cast_value, dumper.(cast_value)} _ -> raise KeyError, "placeholder key #{inspect(key)} not found in #{inspect(placeholder_map)}" end - placeholder_dump = Map.put(placeholder_dump, key, {counter, type, dumped_value}) + placeholder_dump = Map.put(placeholder_dump, key, {counter, type, cast_value, dump_value}) {{:placeholder, counter}, placeholder_dump, counter + 1} end end - defp plan_query_in_rows(rows, header, adapter) do - {rows, _counter} = - Enum.map_reduce(rows, 0, fn fields, counter -> - Enum.flat_map_reduce(header, counter, fn key, counter -> + defp plan_query_in_rows(rows, header, adapter, counter) do + {rows, {cast_params, counter}} = + Enum.map_reduce(rows, {[], counter}, fn fields, {cast_param_acc, counter} -> + Enum.flat_map_reduce(header, {cast_param_acc, counter}, fn key, {cast_param_acc, counter} -> case :lists.keyfind(key, 1, fields) do {^key, %Ecto.Query{} = query} -> {query, params, _} = Ecto.Query.Planner.plan(query, :all, adapter) + {cast_params, dump_params} = Enum.unzip(params) {query, _} = Ecto.Query.Planner.normalize(query, :all, adapter, counter) + num_params = length(dump_params) - {[{key, {query, params}}], counter + length(params)} + {[{key, {query, dump_params}}], {Enum.reverse(cast_params, cast_param_acc), counter + num_params}} - {^key, value} -> - {[{key, value}], counter + 1} + {^key, {:placeholder, _} = value} -> + {[{key, value}], {cast_param_acc, counter}} + + {^key, cast_value, dump_value} -> + {[{key, dump_value}], {[cast_value | cast_param_acc], counter + 1}} false -> - {[], counter} + {[], {cast_param_acc, counter}} end end) end) - rows + {rows, Enum.reverse(cast_params), counter} end defp autogenerate_id(nil, fields, header, _adapter) do @@ -250,12 +245,13 @@ defmodule Ecto.Repo.Schema do defp autogenerate_id({key, source, type}, fields, header, adapter) do case :lists.keyfind(key, 1, fields) do - {^key, _} -> + {^key, _, _} -> {fields, header} false -> - if value = Ecto.Type.adapter_autogenerate(adapter, type) do - {[{source, value} | fields], Map.put(header, source, true)} + if dump_value = Ecto.Type.adapter_autogenerate(adapter, type) do + {:ok, cast_value} = Ecto.Type.adapter_load(adapter, type, dump_value) + {[{source, cast_value, dump_value} | fields], Map.put(header, source, true)} else {fields, header} end @@ -353,20 +349,26 @@ defmodule Ecto.Repo.Schema do schema_meta = metadata(struct, autogen_id, opts) changes = Map.merge(changeset.changes, embeds) - {changes, extra, return_types, return_sources} = + {changes, cast_extra, dump_extra, return_types, return_sources} = autogenerate_id(autogen_id, changes, return_types, return_sources, adapter) - {changes, autogen} = - dump_changes!(:insert, Map.take(changes, fields), schema, extra, dumper, adapter) + changes = Map.take(changes, fields) + autogen = autogenerate_changes(schema, :insert, changes) + + dump_changes = + dump_changes!(:insert, changes, autogen, schema, dump_extra, dumper, adapter) - on_conflict = - on_conflict(on_conflict, conflict_target, schema_meta, fn -> length(changes) end, adapter) + {on_conflict, conflict_cast_params} = + on_conflict(on_conflict, conflict_target, schema_meta, fn -> length(dump_changes) end, adapter) - args = [adapter_meta, schema_meta, changes, on_conflict, return_sources, opts] + change_values = Enum.map(changes, &elem(&1, 1)) + autogen_values = Enum.map(autogen, &elem(&1, 1)) + opts = Keyword.put(opts, :cast_params, change_values ++ autogen_values ++ cast_extra ++ conflict_cast_params) + args = [adapter_meta, schema_meta, dump_changes, on_conflict, return_sources, opts] case apply(user_changeset, adapter, :insert, args) do {:ok, values} -> - values = extra ++ values + values = dump_extra ++ values changeset |> load_changes(:loaded, return_types, values, embeds, autogen, adapter, schema_meta) @@ -435,16 +437,22 @@ defmodule Ecto.Repo.Schema do if changeset.valid? do embeds = Ecto.Embedded.prepare(changeset, embeds, adapter, :update) - original = changeset.changes |> Map.merge(embeds) |> Map.take(fields) - {changes, autogen} = dump_changes!(:update, original, schema, [], dumper, adapter) + changes = changeset.changes |> Map.merge(embeds) |> Map.take(fields) + autogen = autogenerate_changes(schema, :update, changes) + dump_changes = dump_changes!(:update, changes, autogen, schema, [], dumper, adapter) schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) - filters = dump_fields!(:update, schema, filters, dumper, adapter) - args = [adapter_meta, schema_meta, changes, filters, return_sources, opts] + dump_filters = dump_fields!(:update, schema, filters, dumper, adapter) + + change_values = Enum.map(changes, &elem(&1, 1)) + autogen_values = Enum.map(autogen, &elem(&1, 1)) + filter_values = Enum.map(filters, &elem(&1, 1)) + opts = Keyword.put(opts, :cast_params, change_values ++ autogen_values ++ filter_values) + args = [adapter_meta, schema_meta, dump_changes, dump_filters, return_sources, opts] # If there are no changes or all the changes were autogenerated but not forced, we skip {action, autogen} = - if original != %{} or (autogen != [] and force?), + if changes != %{} or (autogen != [] and force?), do: {:update, autogen}, else: {:noop, []} @@ -529,7 +537,7 @@ defmodule Ecto.Repo.Schema do if changeset.valid? do filters = add_pk_filter!(changeset.filters, struct) - filters = dump_fields!(:delete, schema, filters, dumper, adapter) + dump_filters = dump_fields!(:delete, schema, filters, dumper, adapter) # Delete related associations for %{__struct__: mod, on_delete: on_delete} = reflection <- assocs do @@ -537,7 +545,9 @@ defmodule Ecto.Repo.Schema do end schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) - args = [adapter_meta, schema_meta, filters, opts] + filter_values = Enum.map(filters, &elem(&1, 1)) + opts = Keyword.put(opts, :cast_params, filter_values) + args = [adapter_meta, schema_meta, dump_filters, opts] case apply(changeset, adapter, :delete, args) do {:ok, values} -> @@ -618,7 +628,7 @@ defmodule Ecto.Repo.Schema do end end defp put_repo_and_action(%{action: given}, action, repo, _tuplet) when given != nil and given != action, - do: raise ArgumentError, "a changeset with action #{inspect given} was given to #{inspect repo}.#{action}/2" + do: raise(ArgumentError, "a changeset with action #{inspect given} was given to #{inspect repo}.#{action}/2") defp put_repo_and_action(changeset, action, repo, {_adapter_meta, opts}), do: %{changeset | action: action, repo: repo, repo_opts: opts} @@ -673,23 +683,23 @@ defmodule Ecto.Repo.Schema do case on_conflict do :raise when conflict_target == [] -> - {:raise, [], []} + {{:raise, [], []}, []} :raise -> raise ArgumentError, ":conflict_target option is forbidden when :on_conflict is :raise" :nothing -> - {:nothing, [], conflict_target} + {{:nothing, [], conflict_target}, []} {:replace, keys} when is_list(keys) -> fields = Enum.map(keys, &field_source!(schema, &1)) - {fields, [], conflict_target} + {{fields, [], conflict_target}, []} :replace_all -> - {replace_all_fields!(:replace_all, schema, []), [], conflict_target} + {{replace_all_fields!(:replace_all, schema, []), [], conflict_target}, []} {:replace_all_except, fields} -> - {replace_all_fields!(:replace_all_except, schema, fields), [], conflict_target} + {{replace_all_fields!(:replace_all_except, schema, fields), [], conflict_target}, []} [_ | _] = on_conflict -> from = if schema, do: {source, schema}, else: source @@ -725,6 +735,8 @@ defmodule Ecto.Repo.Schema do {query, params, _} = Ecto.Query.Planner.plan(%{query | prefix: prefix}, :update_all, adapter) + {cast_params, dump_params} = Enum.unzip(params) + unless query.from.source == from do raise ArgumentError, "cannot run on_conflict: query because the query " <> "has a different {source, schema} pair than the " <> @@ -733,7 +745,7 @@ defmodule Ecto.Repo.Schema do end {query, _} = Ecto.Query.Planner.normalize(query, :update_all, adapter, counter_fun.()) - {query, params, conflict_target} + {{query, dump_params, conflict_target}, cast_params} end defp apply(_user_changeset, _adapter, :noop, _args) do @@ -920,27 +932,25 @@ defmodule Ecto.Repo.Schema do end defp autogenerate_id(nil, changes, return_types, return_sources, _adapter) do - {changes, [], return_types, return_sources} + {changes, [], [], return_types, return_sources} end defp autogenerate_id({key, source, type}, changes, return_types, return_sources, adapter) do cond do Map.has_key?(changes, key) -> # Set by user - {changes, [], return_types, return_sources} - value = Ecto.Type.adapter_autogenerate(adapter, type) -> # Autogenerated now - {changes, [{source, value}], [{key, type} | return_types], return_sources} + {changes, [], [], return_types, return_sources} + dump_value = Ecto.Type.adapter_autogenerate(adapter, type) -> # Autogenerated now + {:ok, cast_value} = Ecto.Type.adapter_load(adapter, type, dump_value) + {changes, [cast_value], [{source, dump_value}] , [{key, type} | return_types], return_sources} true -> # Autogenerated in storage - {changes, [], [{key, type} | return_types], [source | List.delete(return_sources, source)]} + {changes, [], [], [{key, type} | return_types], [source | List.delete(return_sources, source)]} end end - defp dump_changes!(action, changes, schema, extra, dumper, adapter) do - autogen = autogenerate_changes(schema, action, changes) - dumped = - dump_fields!(action, schema, changes, dumper, adapter) ++ - dump_fields!(action, schema, autogen, dumper, adapter) ++ - extra - {dumped, autogen} + defp dump_changes!(action, changes, autogen, schema, extra, dumper, adapter) do + dump_fields!(action, schema, changes, dumper, adapter) ++ + dump_fields!(action, schema, autogen, dumper, adapter) ++ + extra end defp autogenerate_changes(schema, action, changes) do diff --git a/.deps/ecto/lib/ecto/repo/supervisor.ex b/.deps/ecto/lib/ecto/repo/supervisor.ex @@ -3,7 +3,7 @@ defmodule Ecto.Repo.Supervisor do use Supervisor @defaults [timeout: 15000, pool_size: 10] - @integer_url_query_params ["timeout", "pool_size"] + @integer_url_query_params ["timeout", "pool_size", "idle_interval"] @doc """ Starts the repo supervisor. diff --git a/.deps/ecto/lib/ecto/schema.ex b/.deps/ecto/lib/ecto/schema.ex @@ -2042,20 +2042,30 @@ defmodule Ecto.Schema do @valid_embeds_one_options [:strategy, :on_replace, :source] @doc false - def __embeds_one__(mod, name, schema, opts) do + def __embeds_one__(mod, name, schema, opts) when is_atom(schema) do check_options!(opts, @valid_embeds_one_options, "embeds_one/3") embed(mod, :one, name, schema, opts) end + def __embeds_one__(_mod, _name, schema, _opts) do + raise ArgumentError, + "`embeds_one/3` expects `schema` to be a module name, but received #{inspect(schema)}" + end + @valid_embeds_many_options [:strategy, :on_replace, :source] @doc false - def __embeds_many__(mod, name, schema, opts) do + def __embeds_many__(mod, name, schema, opts) when is_atom(schema) do check_options!(opts, @valid_embeds_many_options, "embeds_many/3") opts = Keyword.put(opts, :default, []) embed(mod, :many, name, schema, opts) end + def __embeds_many__(_mod, _name, schema, _opts) do + raise ArgumentError, + "`embeds_many/3` expects `schema` to be a module name, but received #{inspect(schema)}" + end + @doc false def __embeds_module__(env, name, opts, block) do {pk, opts} = Keyword.pop(opts, :primary_key, {:id, :binary_id, autogenerate: true}) diff --git a/.deps/ecto/mix.exs b/.deps/ecto/mix.exs @@ -2,7 +2,7 @@ defmodule Ecto.MixProject do use Mix.Project @source_url "https://github.com/elixir-ecto/ecto" - @version "3.8.4" + @version "3.9.1" def project do [ diff --git a/.deps/ecto_sql/.hex b/.deps/ecto_sql/.hex Binary files differ. diff --git a/.deps/ecto_sql/CHANGELOG.md b/.deps/ecto_sql/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog for v3.x +## v3.9.0 (2022-09-27) + +### Enhancements + + * [migrations] Support `primary_key` configuration options in `table` + * [migrations] Add `:nulls_distinct` option for unique indexes + * [postgres] Support the use of advisory locks for migrations + * [sql] Add `dump_cmd` to `postgrex` and `myxql` adapters + * [sql] Log human-readable UUIDs by using pre-dumped query parameters + * [sql] Support select aliases from `selected_as/1` and `selected_as/2` + * [telemetry] Emit `schema_migration: true` under `telemetry_options` + ## v3.8.3 (2022-06-04) ### Enhancements @@ -11,7 +23,7 @@ ### Bug fixes * [postgres] Fix possible breaking change on `json_extract_path` for boolean values introduced in v3.8.0 - * [sql] Colorize stacktrace and use `:` before printing line number + * [sql] Colorize stacktrace and use `:` before printing line number ## v3.8.1 (2022-04-29) diff --git a/.deps/ecto_sql/hex_metadata.config b/.deps/ecto_sql/hex_metadata.config @@ -12,9 +12,8 @@ <<"lib/ecto/adapters/tds/connection.ex">>,<<"lib/ecto/adapters/sql.ex">>, <<"lib/ecto/adapters/postgres">>, <<"lib/ecto/adapters/postgres/connection.ex">>, - <<"lib/ecto/adapters/mysql.ex">>,<<"lib/ecto/adapters/myxql.ex">>, - <<"lib/ecto/adapters/postgres.ex">>,<<"lib/ecto/adapters/sql">>, - <<"lib/ecto/adapters/sql/stream.ex">>, + <<"lib/ecto/adapters/myxql.ex">>,<<"lib/ecto/adapters/postgres.ex">>, + <<"lib/ecto/adapters/sql">>,<<"lib/ecto/adapters/sql/stream.ex">>, <<"lib/ecto/adapters/sql/sandbox.ex">>, <<"lib/ecto/adapters/sql/connection.ex">>, <<"lib/ecto/adapters/sql/application.ex">>,<<"lib/ecto/migration.ex">>, @@ -46,7 +45,7 @@ {<<"name">>,<<"ecto">>}, {<<"optional">>,false}, {<<"repository">>,<<"hexpm">>}, - {<<"requirement">>,<<"~> 3.8.4">>}], + {<<"requirement">>,<<"~> 3.9.0">>}], [{<<"app">>,<<"telemetry">>}, {<<"name">>,<<"telemetry">>}, {<<"optional">>,false}, @@ -61,7 +60,7 @@ {<<"name">>,<<"postgrex">>}, {<<"optional">>,true}, {<<"repository">>,<<"hexpm">>}, - {<<"requirement">>,<<"~> 0.15.0 or ~> 0.16.0 or ~> 1.0">>}], + {<<"requirement">>,<<"~> 0.16.0 or ~> 1.0">>}], [{<<"app">>,<<"myxql">>}, {<<"name">>,<<"myxql">>}, {<<"optional">>,true}, @@ -72,4 +71,4 @@ {<<"optional">>,true}, {<<"repository">>,<<"hexpm">>}, {<<"requirement">>,<<"~> 2.1.1 or ~> 2.2">>}]]}. -{<<"version">>,<<"3.8.3">>}. +{<<"version">>,<<"3.9.0">>}. diff --git a/.deps/ecto_sql/integration_test/sql/logging.exs b/.deps/ecto_sql/integration_test/sql/logging.exs @@ -3,9 +3,10 @@ defmodule Ecto.Integration.LoggingTest do alias Ecto.Integration.TestRepo alias Ecto.Integration.PoolRepo - alias Ecto.Integration.Post + alias Ecto.Integration.{Post, Logging, ArrayLogging} import ExUnit.CaptureLog + import Ecto.Query, only: [from: 2] describe "telemetry" do test "dispatches event" do @@ -75,8 +76,7 @@ defmodule Ecto.Integration.LoggingTest do @stacktrace_opts [stacktrace: true, log: :error] defp stacktrace_entry(line) do - "↳ anonymous fn/0 in Ecto.Integration.LoggingTest.\"test logs includes stacktraces\"/1, " <> - "at: integration_test/sql/logging.exs:#{line - 3}" + ~r/↳ anonymous fn\/0 in Ecto.Integration.LoggingTest.\"test logs includes stacktraces\"\/1, at: .*integration_test\/sql\/logging.exs:#{line - 3}/ end test "when some measurements are nil" do @@ -132,4 +132,556 @@ defmodule Ecto.Integration.LoggingTest do end) == "" end end + + describe "parameter logging" do + @describetag :parameter_logging + + @uuid_regex ~r/[0-9a-f]{2}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i + @naive_datetime_regex ~r/~N\[[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\]/ + + test "for insert_all with query" do + # Source query + int = 1 + uuid = Ecto.UUID.generate() + + source_query = + from l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + select: %{uuid: l.uuid, int: l.int} + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, source_query, log: :info) + end) + + param_regex = ~r/\[(?<int>.+), \"(?<uuid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # Query parameters + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :insert_select + test "for insert_all with entries" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: now, + updated_at: now + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + now2 = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: now2, + updated_at: now2 + ] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], log: :info) + end) + + param_regex = + ~r/\[\"(?<bid1>.+)\", (?<inserted_at1>.+), (?<int1>.+), (?<updated_at1>.+), (?<uuid1_query_int>.+), \"(?<uuid1_query_uuid>.+)\", \"(?<bid2>.+)\", (?<inserted_at2>.+), (?<int2_query_int>.+), \"(?<int2_query_uuid>.+)\", (?<updated_at2>.+), \"(?<uuid2>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + + # Autogenerated fields + assert param_logs["bid1"] =~ @uuid_regex + assert param_logs["bid2"] =~ @uuid_regex + # Row value parameters + assert param_logs["int1"] == Integer.to_string(int) + assert param_logs["inserted_at1"] == "~N[#{now}]" + assert param_logs["inserted_at2"] == "~N[#{now}]" + assert param_logs["uuid1_query_int"] == Integer.to_string(int) + assert param_logs["uuid1_query_uuid"] == uuid + assert param_logs["int2_query_int"] == Integer.to_string(int2) + assert param_logs["int2_query_uuid"] == uuid2 + assert param_logs["inserted_at2"] == "~N[#{now2}]" + assert param_logs["updated_at2"] == "~N[#{now2}]" + assert param_logs["uuid2"] == uuid2 + end + + @tag :insert_select + @tag :placeholders + test "for insert_all with entries and placeholders" do + # Placeholders + now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + now2 = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + placeholder_map = %{now: now, now2: now2} + + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: {:placeholder, :now}, + updated_at: {:placeholder, :now} + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: {:placeholder, :now2}, + updated_at: {:placeholder, :now2} + ] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], placeholders: placeholder_map, log: :info) + end) + + param_regex = + ~r/\[(?<now_placeholder>.+), (?<now2_placeholder>.+), \"(?<bid1>.+)\", (?<int1>.+), (?<uuid1_query_int>.+), \"(?<uuid1_query_uuid>.+)\", \"(?<bid2>.+)\", (?<int2_query_int>.+), \"(?<int2_query_uuid>.+)\", \"(?<uuid2>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # Placeholders + assert param_logs["now_placeholder"] == "~N[#{now}]" + assert param_logs["now2_placeholder"] == "~N[#{now2}]" + # Autogenerated fields + assert param_logs["bid1"] =~ @uuid_regex + assert param_logs["bid2"] =~ @uuid_regex + # Row value parameters + assert param_logs["int1"] == Integer.to_string(int) + assert param_logs["uuid1_query_int"] == Integer.to_string(int) + assert param_logs["uuid1_query_uuid"] == uuid + assert param_logs["int2_query_int"] == Integer.to_string(int2) + assert param_logs["int2_query_uuid"] == uuid2 + assert param_logs["uuid2"] == uuid2 + end + + @tag :with_conflict_target + test "for insert_all with query with conflict query" do + # Source query + int = 1 + uuid = Ecto.UUID.generate() + + source_query = + from l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + select: %{uuid: l.uuid, int: l.int} + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, source_query, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[(?<int>.+), \"(?<uuid>.+)\", (?<conflict_update>.+), (?<conflict_int>.+), \"(?<conflict_uuid>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # Query parameters + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + @tag :insert_select + @tag :with_conflict_target + test "for insert_all with entries conflict query" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: now, + updated_at: now + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + now2 = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: now2, + updated_at: now2 + ] + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[\"(?<bid1>.+)\", (?<inserted_at1>.+), (?<int1>.+), (?<updated_at1>.+), (?<uuid1_query_int>.+), \"(?<uuid1_query_uuid>.+)\", \"(?<bid2>.+)\", (?<inserted_at2>.+), (?<int2_query_int>.+), \"(?<int2_query_uuid>.+)\", (?<updated_at2>.+), \"(?<uuid2>.+)\", (?<conflict_update>.+), (?<conflict_int>.+), \"(?<conflict_uuid>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # Autogenerated fields + assert param_logs["bid1"] =~ @uuid_regex + assert param_logs["bid2"] =~ @uuid_regex + # Row value parameters + assert param_logs["int1"] == Integer.to_string(int) + assert param_logs["inserted_at1"] == "~N[#{now2}]" + assert param_logs["updated_at1"] == "~N[#{now2}]" + assert param_logs["uuid1_query_int"] == Integer.to_string(int) + assert param_logs["uuid1_query_uuid"] == uuid + assert param_logs["int2_query_int"] == Integer.to_string(int2) + assert param_logs["int2_query_uuid"] == uuid2 + assert param_logs["inserted_at2"] == "~N[#{now2}]" + assert param_logs["updated_at2"] == "~N[#{now2}]" + assert param_logs["uuid2"] == uuid2 + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + @tag :insert_select + @tag :placeholders + @tag :with_conflict_target + test "for insert_all with entries, placeholders and conflict query" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: {:placeholder, :now}, + updated_at: {:placeholder, :now2} + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: {:placeholder, :now}, + updated_at: {:placeholder, :now2} + ] + + # Placeholders + now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + now2 = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + placeholder_map = %{now: now, now2: now2} + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], + placeholders: placeholder_map, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[(?<now_placeholder>.+), (?<now2_placeholder>.+), \"(?<bid1>.+)\", (?<int1>.+), (?<uuid1_query_int>.+), \"(?<uuid1_query_uuid>.+)\", \"(?<bid2>.+)\", (?<int2_query_int>.+), \"(?<int2_query_uuid>.+)\", \"(?<uuid2>.+)\", (?<conflict_update>.+), (?<conflict_int>.+), \"(?<conflict_uuid>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # Placeholders + assert param_logs["now_placeholder"] == "~N[#{now}]" + assert param_logs["now2_placeholder"] == "~N[#{now2}]" + # Autogenerated fields + assert param_logs["bid1"] =~ @uuid_regex + assert param_logs["bid2"] =~ @uuid_regex + # Row value parameters + assert param_logs["int1"] == Integer.to_string(int) + assert param_logs["uuid1_query_int"] == Integer.to_string(int) + assert param_logs["uuid1_query_uuid"] == uuid + assert param_logs["int2_query_int"] == Integer.to_string(int2) + assert param_logs["int2_query_uuid"] == uuid2 + assert param_logs["uuid2"] == uuid2 + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + test "for insert" do + # Insert values + int = 1 + uuid = Ecto.UUID.generate() + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert!(%Logging{uuid: uuid, int: int}, + log: :info + ) + end) + + param_regex = + ~r/\[(?<int>.+), \"(?<uuid>.+)\", (?<inserted_at>.+), (?<updated_at>.+), \"(?<bid>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["inserted_at"] =~ @naive_datetime_regex + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] =~ @uuid_regex + end + + @tag :with_conflict_target + test "for insert with conflict query" do + # Insert values + int = 1 + uuid = Ecto.UUID.generate() + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert!(%Logging{uuid: uuid, int: int}, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[(?<int>.+), \"(?<uuid>.+)\", (?<inserted_at>.+), (?<updated_at>.+), \"(?<bid>.+)\", (?<conflict_update>.+), (?<conflict_int>.+), \"(?<conflict_uuid>.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["inserted_at"] =~ @naive_datetime_regex + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] =~ @uuid_regex + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + test "for update" do + # Update values + int = 1 + uuid = Ecto.UUID.generate() + current = TestRepo.insert!(%Logging{}) + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.update!(Ecto.Changeset.change(current, %{uuid: uuid, int: int}), log: :info) + end) + + param_regex = ~r/\[(?<int>.+), \"(?<uuid>.+)\", (?<updated_at>.+), \"(?<bid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] == current.bid + end + + test "for delete" do + current = TestRepo.insert!(%Logging{}) + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.delete!(current, log: :info) + end) + + param_regex = ~r/\[\"(?<bid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # Filters + assert param_logs["bid"] == current.bid + end + + test "for queries" do + int = 1 + uuid = Ecto.UUID.generate() + + # all + log = + capture_log(fn -> + TestRepo.all( + from(l in Logging, + select: type(^"1", :integer), + where: l.int == ^int and l.uuid == ^uuid + ), + log: :info + ) + end) + + param_regex = ~r/\[(?<tagged_int>.+), (?<int>.+), \"(?<uuid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["tagged_int"] == Integer.to_string(int) + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + + # update_all + update = 2 + + log = + capture_log(fn -> + from(l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + update: [set: [int: ^update]] + ) + |> TestRepo.update_all([], log: :info) + end) + + param_regex = ~r/\[(?<update>.+), (?<int>.+), \"(?<uuid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["update"] == Integer.to_string(update) + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + + # delete_all + log = + capture_log(fn -> + TestRepo.delete_all(from(l in Logging, where: l.int == ^int and l.uuid == ^uuid), + log: :info + ) + end) + + param_regex = ~r/\[(?<int>.+), \"(?<uuid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :stream + test "for queries with stream" do + int = 1 + uuid = Ecto.UUID.generate() + + log = + capture_log(fn -> + stream = + TestRepo.stream(from(l in Logging, where: l.int == ^int and l.uuid == ^uuid), + log: :info + ) + + TestRepo.transaction(fn -> Enum.to_list(stream) end) + end) + + param_regex = ~r/\[(?<int>.+), \"(?<uuid>.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :array_type + test "for queries with array type" do + uuid = Ecto.UUID.generate() + uuid2 = Ecto.UUID.generate() + + log = + capture_log(fn -> + TestRepo.all(from(a in ArrayLogging, where: a.uuids == ^[uuid, uuid2]), + log: :info + ) + end) + + param_regex = ~r/\[(?<uuids>\[.+\])\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["uuids"] == "[\"#{uuid}\", \"#{uuid2}\"]" + end + end end diff --git a/.deps/ecto_sql/integration_test/sql/migrator.exs b/.deps/ecto_sql/integration_test/sql/migrator.exs @@ -89,6 +89,27 @@ defmodule Ecto.Integration.MigratorTest do assert down(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok end + test "ecto-generated migration queries pass schema_migration in telemetry options" do + handler = fn _event_name, _measurements, metadata -> + send(self(), metadata) + end + + # migration table creation + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, log: false) + assert_received %{options: [schema_migration: true]} + + # transaction begin statement + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, skip_table_creation: true, log: false) + assert_received %{options: [schema_migration: true]} + + # retrieving the migration versions + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, migration_lock: false, skip_table_creation: true, log: false) + assert_received %{options: [schema_migration: true]} + end + test "bad execute migration" do assert catch_error(up(PoolRepo, 31, BadMigration, log: false)) end diff --git a/.deps/ecto_sql/integration_test/support/migration.exs b/.deps/ecto_sql/integration_test/support/migration.exs @@ -100,6 +100,11 @@ defmodule Ecto.Integration.Migration do add :uuids, {:array, :uuid}, default: [] add :items, {:array, :map} end + + create table(:array_loggings) do + add :uuids, {:array, :uuid}, default: [] + timestamps() + end end create table(:composite_pk, primary_key: false) do @@ -128,5 +133,12 @@ defmodule Ecto.Integration.Migration do create table(:bits) do add :bit, :bit end + + create table(:loggings, primary_key: false) do + add :bid, :binary_id, primary_key: true + add :int, :integer + add :uuid, :uuid + timestamps() + end end end diff --git a/.deps/ecto_sql/lib/ecto/adapter/structure.ex b/.deps/ecto_sql/lib/ecto/adapter/structure.ex @@ -38,4 +38,25 @@ defmodule Ecto.Adapter.Structure do """ @callback structure_load(default :: String.t, config :: Keyword.t) :: {:ok, String.t} | {:error, term} + + @doc """ + Runs the dump command for the given repo / config. + + Calling this function will setup authentication and run the dump cli + command with your provided `args`. + + The options in `opts` are passed directly to `System.cmd/3`. + + Returns `{output, exit_status}` where `output` is a string of the stdout + (as long as no option `into` is provided, see `System.cmd/3`) and `exit_status` + is the exit status of the invoation. (`0` for success) + + ## Examples + + iex> dump_cmd(["--data-only", "--table", "table_name"], [stdout_to_stderr: true], Acme.Repo.config()) + "--\n-- PostgreSQL database dump\n--\n" <> _rest + + """ + @callback dump_cmd(args :: [String.t()], opts :: Keyword.t(), config :: Keyword.t()) :: + {output :: Collectable.t(), exit_status :: non_neg_integer()} end diff --git a/.deps/ecto_sql/lib/ecto/adapters/mysql.ex b/.deps/ecto_sql/lib/ecto/adapters/mysql.ex @@ -1,23 +0,0 @@ -defmodule Ecto.Adapters.MySQL do - @moduledoc false - - @behaviour Ecto.Adapter - - defp error!() do - raise "Ecto.Adapters.MySQL is obsolete, use Ecto.Adapters.MyXQL instead" - end - - defmacro __before_compile__(_env), do: error!() - - def ensure_all_started(_, _), do: error!() - - def init(_), do: error!() - - def checkout(_, _, _), do: error!() - - def checked_out?(_), do: error!() - - def loaders(_, _), do: error!() - - def dumpers(_, _), do: error!() -end diff --git a/.deps/ecto_sql/lib/ecto/adapters/myxql.ex b/.deps/ecto_sql/lib/ecto/adapters/myxql.ex @@ -244,7 +244,7 @@ defmodule Ecto.Adapters.MyXQL do Ecto.Adapters.SQL.raise_migration_pool_size_error() end - opts = Keyword.put(opts, :timeout, :infinity) + opts = Keyword.merge(opts, [timeout: :infinity, telemetry_options: [schema_migration: true]]) {:ok, result} = transaction(meta, opts, fn -> @@ -344,10 +344,7 @@ defmodule Ecto.Adapters.MyXQL do def structure_load(default, config) do path = config[:dump_path] || Path.join(default, "structure.sql") - args = [ - "--execute", "SET FOREIGN_KEY_CHECKS = 0; SOURCE #{path}; SET FOREIGN_KEY_CHECKS = 1", - "--database", config[:database] - ] + args = ["--execute", "SET FOREIGN_KEY_CHECKS = 0; SOURCE #{path}; SET FOREIGN_KEY_CHECKS = 1"] case run_with_cmd("mysql", config, args) do {_output, 0} -> {:ok, path} @@ -355,6 +352,10 @@ defmodule Ecto.Adapters.MyXQL do end end + @impl true + def dump_cmd(args, opts \\ [], config) when is_list(config) and is_list(args), + do: run_with_cmd("mysqldump", config, args, opts) + ## Helpers defp run_query(sql, opts) do @@ -395,7 +396,7 @@ defmodule Ecto.Adapters.MyXQL do defp exit_to_exception(reason), do: RuntimeError.exception(Exception.format_exit(reason)) - defp run_with_cmd(cmd, opts, opt_args) do + defp run_with_cmd(cmd, opts, opt_args, cmd_opts \\ []) do unless System.find_executable(cmd) do raise "could not find executable `#{cmd}` in path, " <> "please guarantee it is available before running ecto commands" @@ -419,13 +420,26 @@ defmodule Ecto.Adapters.MyXQL do [] end + database_args = + if database = opts[:database] do + ["--database", database] + else + [] + end + args = [ "--host", host, "--port", to_string(port), "--protocol", protocol - ] ++ user_args ++ opt_args + ] ++ user_args ++ database_args ++ opt_args + + cmd_opts = + cmd_opts + |> Keyword.put_new(:stderr_to_stdout, true) + |> Keyword.update(:env, env, &Enum.concat(env, &1)) + - System.cmd(cmd, args, env: env, stderr_to_stdout: true) + System.cmd(cmd, args, cmd_opts) end end diff --git a/.deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex b/.deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex @@ -271,8 +271,11 @@ if Code.ensure_loaded?(MyXQL) do intersperse_map(fields, ", ", fn {:&, _, [idx]} -> case elem(sources, idx) do + {nil, source, nil} -> + error!(query, "MySQL adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select") {source, _, nil} -> - error!(query, "MySQL does not support selecting all fields from #{source} without a schema. " <> + error!(query, "MySQL adapter does not support selecting all fields from #{source} without a schema. " <> "Please specify a schema or specify exactly which fields you want to select") {_, source, _} -> source @@ -572,6 +575,10 @@ if Code.ensure_loaded?(MyXQL) do quote_name(literal) end + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do ["date_add(", expr(datetime, sources, query), ", ", interval(count, interval, sources, query) | ")"] @@ -769,6 +776,10 @@ if Code.ensure_loaded?(MyXQL) do error!(nil, "MySQL adapter does not support where in indexes") end + if index.nulls_distinct == false do + error!(nil, "MySQL adapter does not support nulls_distinct set to false in indexes") + end + [["CREATE", if_do(index.unique, " UNIQUE"), " INDEX ", quote_name(index.name), " ON ", diff --git a/.deps/ecto_sql/lib/ecto/adapters/postgres.ex b/.deps/ecto_sql/lib/ecto/adapters/postgres.ex @@ -25,6 +25,27 @@ defmodule Ecto.Adapters.Postgres do YourApp.Repo.all(Queryable, prepare: :unnamed) + ### Migration options + + * `:migration_lock` - prevent multiple nodes from running migrations at the same + time by obtaining a lock. The value `:table_lock` will lock migrations by wrapping + the entire migration inside a database transaction, including inserting the + migration version into the migration source (by default, "schema_migrations"). + You may alternatively select `:pg_advisory_lock` which has the benefit + of allowing concurrent operations such as creating indexes. (default: `:table_lock`) + + When using the `:pg_advisory_lock` migration lock strategy and Ecto cannot obtain + the lock due to another instance occupying the lock, Ecto will wait for 5 seconds + and then retry infinity times. This is configurable on the repo with keys + `:migration_advisory_lock_retry_interval_ms` and `:migration_advisory_lock_max_tries`. + If the retries are exhausted, the migration will fail. + + Some downsides to using advisory locks is that some Postgres-compatible systems or plugins + may not support session level locks well and therefore result in inconsistent behavior. + For example, PgBouncer when using pool_modes other than session won't work well with + advisory locks. CockroachDB is another system that is designed in a way that advisory + locks don't make sense for their distributed database. + ### Connection options * `:hostname` - Server hostname @@ -105,6 +126,8 @@ defmodule Ecto.Adapters.Postgres do # Inherit all behaviour from Ecto.Adapters.SQL use Ecto.Adapters.SQL, driver: :postgrex + require Logger + # And provide a custom storage implementation @behaviour Ecto.Adapter.Storage @behaviour Ecto.Adapter.Structure @@ -225,15 +248,32 @@ defmodule Ecto.Adapters.Postgres do @impl true def lock_for_migrations(meta, opts, fun) do - %{opts: adapter_opts} = meta + %{opts: adapter_opts, repo: repo} = meta if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do Ecto.Adapters.SQL.raise_migration_pool_size_error() end - opts = Keyword.put(opts, :timeout, :infinity) + opts = Keyword.merge(opts, [timeout: :infinity, telemetry_options: [schema_migration: true]]) + config = repo.config() + lock_strategy = Keyword.get(config, :migration_lock, :table_lock) + do_lock_for_migrations(lock_strategy, meta, opts, config, fun) + end + + defp do_lock_for_migrations(:pg_advisory_lock, meta, opts, config, fun) do + lock = :erlang.phash2({:ecto, opts[:prefix], meta.repo}) - {:ok, result} = + retry_state = %{ + retry_interval_ms: config[:migration_advisory_lock_retry_interval_ms] || 5000, + max_tries: config[:migration_advisory_lock_max_tries] || :infinity, + tries: 0 + } + + advisory_lock(meta, opts, lock, retry_state, fun) + end + + defp do_lock_for_migrations(:table_lock, meta, opts, _config, fun) do + {:ok, res} = transaction(meta, opts, fn -> # SHARE UPDATE EXCLUSIVE MODE is the first lock that locks # itself but still allows updates to happen, see @@ -244,7 +284,55 @@ defmodule Ecto.Adapters.Postgres do fun.() end) - result + res + end + + defp advisory_lock(meta, opts, lock, retry_state, fun) do + result = checkout(meta, opts, fn -> + case Ecto.Adapters.SQL.query(meta, "SELECT pg_try_advisory_lock(#{lock})", [], opts) do + {:ok, %{rows: [[true]]}} -> + try do + {:ok, fun.()} + after + release_advisory_lock(meta, opts, lock) + end + _ -> + :no_advisory_lock + end + end) + + case result do + {:ok, fun_result} -> + fun_result + + :no_advisory_lock -> + maybe_retry_advisory_lock(meta, opts, lock, retry_state, fun) + end + end + + defp release_advisory_lock(meta, opts, lock) do + case Ecto.Adapters.SQL.query(meta, "SELECT pg_advisory_unlock(#{lock})", [], opts) do + {:ok, %{rows: [[true]]}} -> + :ok + _ -> + raise "failed to release advisory lock" + end + end + + defp maybe_retry_advisory_lock(meta, opts, lock, retry_state, fun) do + %{retry_interval_ms: interval, max_tries: max_tries, tries: tries} = retry_state + + if max_tries != :infinity && max_tries <= tries do + raise "failed to obtain advisory lock. Tried #{max_tries} times waiting #{interval}ms between tries" + else + if Keyword.get(opts, :log_migrator_sql, false) do + Logger.info("Migration lock occupied for #{inspect(meta.repo)}. Retry #{tries + 1}/#{max_tries} at #{interval}ms intervals.") + end + + Process.sleep(interval) + retry_state = %{retry_state | tries: tries + 1} + advisory_lock(meta, opts, lock, retry_state, fun) + end end @impl true @@ -267,8 +355,7 @@ defmodule Ecto.Adapters.Postgres do path = config[:dump_path] || Path.join(default, "structure.sql") File.mkdir_p!(Path.dirname(path)) - case run_with_cmd("pg_dump", config, ["--file", path, "--schema-only", "--no-acl", - "--no-owner", config[:database]]) do + case run_with_cmd("pg_dump", config, ["--file", path, "--schema-only", "--no-acl", "--no-owner"]) do {_output, 0} -> {:ok, path} {output, _} -> @@ -293,14 +380,17 @@ defmodule Ecto.Adapters.Postgres do @impl true def structure_load(default, config) do path = config[:dump_path] || Path.join(default, "structure.sql") - args = ["--quiet", "--file", path, "-vON_ERROR_STOP=1", - "--single-transaction", config[:database]] + args = ["--quiet", "--file", path, "-vON_ERROR_STOP=1", "--single-transaction"] case run_with_cmd("psql", config, args) do {_output, 0} -> {:ok, path} {output, _} -> {:error, output} end end + @impl true + def dump_cmd(args, opts \\ [], config) when is_list(config) and is_list(args), + do: run_with_cmd("pg_dump", config, args, opts) + ## Helpers defp run_query(sql, opts) do @@ -338,7 +428,7 @@ defmodule Ecto.Adapters.Postgres do end end - defp run_with_cmd(cmd, opts, opt_args) do + defp run_with_cmd(cmd, opts, opt_args, cmd_opts \\ []) do unless System.find_executable(cmd) do raise "could not find executable `#{cmd}` in path, " <> "please guarantee it is available before running ecto commands" @@ -356,9 +446,11 @@ defmodule Ecto.Adapters.Postgres do args = [] args = - if username = opts[:username], do: ["-U", username|args], else: args + if username = opts[:username], do: ["--username", username | args], else: args args = - if port = opts[:port], do: ["-p", to_string(port)|args], else: args + if port = opts[:port], do: ["--port", to_string(port) | args], else: args + args = + if database = opts[:database], do: ["--dbname", database | args], else: args host = opts[:socket_dir] || opts[:hostname] || System.get_env("PGHOST") || "localhost" @@ -369,8 +461,14 @@ defmodule Ecto.Adapters.Postgres do ) end - args = ["--host", host|args] + args = ["--host", host | args] args = args ++ opt_args - System.cmd(cmd, args, env: env, stderr_to_stdout: true) + + cmd_opts = + cmd_opts + |> Keyword.put_new(:stderr_to_stdout, true) + |> Keyword.update(:env, env, &Enum.concat(env, &1)) + + System.cmd(cmd, args, cmd_opts) end end diff --git a/.deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex b/.deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex @@ -366,8 +366,11 @@ if Code.ensure_loaded?(Postgrex) do intersperse_map(fields, ", ", fn {:&, _, [idx]} -> case elem(sources, idx) do + {nil, source, nil} -> + error!(query, "PostgreSQL adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select") {source, _, nil} -> - error!(query, "PostgreSQL does not support selecting all fields from #{source} without a schema. " <> + error!(query, "PostgreSQL adapter does not support selecting all fields from #{source} without a schema. " <> "Please specify a schema or specify exactly which fields you want to select") {_, source, _} -> source @@ -676,6 +679,10 @@ if Code.ensure_loaded?(Postgrex) do quote_name(literal) end + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do [expr(datetime, sources, query), type_unless_typed(datetime, "timestamp"), " + ", interval(count, interval, sources, query)] @@ -904,6 +911,13 @@ if Code.ensure_loaded?(Postgrex) do fields = intersperse_map(index.columns, ", ", &index_expr/1) include_fields = intersperse_map(index.include, ", ", &index_expr/1) + maybe_nulls_distinct = + case index.nulls_distinct do + nil -> [] + true -> " NULLS DISTINCT" + false -> " NULLS NOT DISTINCT" + end + queries = [["CREATE ", if_do(index.unique, "UNIQUE "), "INDEX ", @@ -915,6 +929,7 @@ if Code.ensure_loaded?(Postgrex) do if_do(index.using, [" USING " , to_string(index.using)]), ?\s, ?(, fields, ?), if_do(include_fields != [], [" INCLUDE ", ?(, include_fields, ?)]), + maybe_nulls_distinct, if_do(index.where, [" WHERE ", to_string(index.where)])]] queries ++ comments_on("INDEX", quote_table(index.prefix, index.name), index.comment) diff --git a/.deps/ecto_sql/lib/ecto/adapters/sql.ex b/.deps/ecto_sql/lib/ecto/adapters/sql.ex @@ -39,7 +39,7 @@ defmodule Ecto.Adapters.SQL do Generally speaking, you must invoke those functions directly from your repository, for example: `MyApp.Repo.query("SELECT true")`. - You can also invoke them direcltly from `Ecto.Adapters.SQL`, but + You can also invoke them directly from `Ecto.Adapters.SQL`, but keep in mind that in such cases features such as "dynamic repositories" won't be available. @@ -192,14 +192,14 @@ defmodule Ecto.Adapters.SQL do @impl true def update(adapter_meta, %{source: source, prefix: prefix}, fields, params, returning, opts) do {fields, field_values} = :lists.unzip(fields) - filter_values = params |> Keyword.values() |> Enum.reject(&is_nil(&1)) + filter_values = Keyword.values(params) sql = @conn.update(prefix, source, fields, params, returning) Ecto.Adapters.SQL.struct(adapter_meta, @conn, sql, :update, source, params, field_values ++ filter_values, :raise, returning, opts) end @impl true def delete(adapter_meta, %{source: source, prefix: prefix}, params, opts) do - filter_values = params |> Keyword.values() |> Enum.reject(&is_nil(&1)) + filter_values = Keyword.values(params) sql = @conn.delete(prefix, source, params, []) Ecto.Adapters.SQL.struct(adapter_meta, @conn, sql, :delete, source, params, filter_values, :raise, [], opts) end @@ -315,9 +315,6 @@ defmodule Ecto.Adapters.SQL do _Postgrex_: Check [PostgreSQL doc](https://www.postgresql.org/docs/current/sql-explain.html) for version compatibility. - _MyXQL_: `EXTENDED` and `PARTITIONS` opts were [deprecated](https://dev.mysql.com/doc/refman/5.7/en/explain.html) - and are enabled by default. - Also note that: * Currently `:map`, `:yaml`, and `:text` format options are supported @@ -634,31 +631,7 @@ defmodule Ecto.Adapters.SQL do ## Callbacks @doc false - def __before_compile__(driver, _env) do - case Application.get_env(:ecto, :json_library) do - nil -> - :ok - - Jason -> - IO.warn """ - Jason is the default :json_library in Ecto 3.0. - You no longer need to configure it explicitly, - please remove this line from your config files: - - config :ecto, :json_library, Jason - - """ - - value -> - IO.warn """ - The :json_library configuration for the :ecto application is deprecated. - Please configure the :json_library in the driver instead: - - config #{inspect driver}, :json_library, #{inspect value} - - """ - end - + def __before_compile__(_driver, _env) do quote do @doc """ A convenience function for SQL-based repositories that executes the given query. @@ -1071,12 +1044,6 @@ defmodule Ecto.Adapters.SQL do result = with {:ok, _query, res} <- result, do: {:ok, res} stacktrace = Keyword.get(opts, :stacktrace) - params = - Enum.map(params, fn - %Ecto.Query.Tagged{value: value} -> value - value -> value - end) - acc = if idle_time, do: [idle_time: idle_time], else: [] @@ -1106,7 +1073,7 @@ defmodule Ecto.Adapters.SQL do true -> Logger.log( log, - fn -> log_iodata(measurements, repo, source, query, params, result, stacktrace) end, + fn -> log_iodata(measurements, repo, source, query, opts[:cast_params] || params, result, stacktrace) end, ansi_color: sql_color(query) ) @@ -1116,7 +1083,7 @@ defmodule Ecto.Adapters.SQL do level -> Logger.log( level, - fn -> log_iodata(measurements, repo, source, query, params, result, stacktrace) end, + fn -> log_iodata(measurements, repo, source, query, opts[:cast_params] || params, result, stacktrace) end, ansi_color: sql_color(query) ) end @@ -1178,11 +1145,12 @@ defmodule Ecto.Adapters.SQL do with [_ | _] <- stacktrace, {module, function, arity, info} <- last_non_ecto(Enum.reverse(stacktrace), repo, nil) do [ - IO.ANSI.light_black(), ?\n, + IO.ANSI.light_black(), "↳ ", Exception.format_mfa(module, function, arity), - log_stacktrace_info(info) + log_stacktrace_info(info), + IO.ANSI.reset(), ] else _ -> [] diff --git a/.deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex b/.deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex @@ -444,7 +444,7 @@ defmodule Ecto.Adapters.SQL.Sandbox do * `{:shared, pid}` - after checking out a connection in manual mode, you can change the mode to `{:shared, pid}`, where pid is the process that owns the connection, most often `{:shared, self()}`. This makes it - so all processes can use the same connection as the one owner by the + so all processes can use the same connection as the one owned by the current process. This is the mode you will run your sync tests in Whenever you change the mode to `:manual` or `:auto`, all existing diff --git a/.deps/ecto_sql/lib/ecto/adapters/tds.ex b/.deps/ecto_sql/lib/ecto/adapters/tds.ex @@ -284,7 +284,7 @@ defmodule Ecto.Adapters.Tds do Ecto.Adapters.SQL.raise_migration_pool_size_error() end - opts = Keyword.put(opts, :timeout, :infinity) + opts = Keyword.merge(opts, [timeout: :infinity, telemetry_options: [schema_migration: true]]) {:ok, result} = transaction(meta, opts, fn -> diff --git a/.deps/ecto_sql/lib/ecto/adapters/tds/connection.ex b/.deps/ecto_sql/lib/ecto/adapters/tds/connection.ex @@ -395,12 +395,13 @@ if Code.ensure_loaded?(Tds) do intersperse_map(fields, ", ", fn {:&, _, [idx]} -> case elem(sources, idx) do + {nil, source, nil} -> + error!(query, "Tds adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select") + {source, _, nil} -> - error!( - query, - "Tds adapter does not support selecting all fields from #{source} without a schema. " <> - "Please specify a schema or specify exactly which fields you want in projection" - ) + error!(query, "Tds adapter does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want in projection") {_, source, _} -> source @@ -764,6 +765,10 @@ if Code.ensure_loaded?(Tds) do quote_name(literal) end + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do [ "DATEADD(", @@ -1065,6 +1070,10 @@ if Code.ensure_loaded?(Tds) do error!(nil, "MSSQL does not support `using` in indexes") end + if index.nulls_distinct == true do + error!(nil, "MSSQL does not support nulls_distinct set to true in indexes") + end + with_options = if index.concurrently or index.options != nil do [ diff --git a/.deps/ecto_sql/lib/ecto/migration.ex b/.deps/ecto_sql/lib/ecto/migration.ex @@ -180,24 +180,11 @@ defmodule Ecto.Migration do To avoid that we recommend to use `execute/2` with anonymous functions instead. For more information and example usage please take a look at `execute/2` function. - ## Comments - - Migrations where you create or alter a table support specifying table - and column comments. The same can be done when creating constraints - and indexes. Not all databases support this feature. - - def up do - create index("posts", [:name], comment: "Index Comment") - create constraint("products", "price_must_be_positive", check: "price > 0", comment: "Constraint Comment") - create table("weather", prefix: "north_america", comment: "Table Comment") do - add :city, :string, size: 40, comment: "Column Comment" - timestamps() - end - end - ## Repo configuration - The following migration configuration options are available for a given repository: + ### Migrator configuration + + These options configure how the underlying migration engine works: * `:migration_source` - Version numbers of migrations will be saved in a table named `schema_migrations` by default. You can configure the name of @@ -205,6 +192,39 @@ defmodule Ecto.Migration do config :app, App.Repo, migration_source: "my_migrations" + * `:migration_lock` - By default, Ecto will lock the migration source to throttle + multiple nodes to run migrations one at a time. You can disable the `migration_lock` + by setting it to `false`. You may also select a different locking strategy if + supported by the adapter. See the adapter docs for more information. + + config :app, App.Repo, migration_lock: false + + # Or use a different locking strategy. For example, Postgres can use advisory + # locks but be aware that your database configuration might not make this a good + # fit. See the Ecto.Adapters.Postgres for more information: + config :app, App.Repo, migration_lock: :pg_advisory_lock + + * `:migration_repo` - The migration repository is where the table managing the + migrations will be stored (`migration_source` defines the table name). It defaults + to the given repository itself but you can configure it via: + + config :app, App.Repo, migration_repo: App.MigrationRepo + + * `:priv` - the priv directory for the repo with the location of important assets, + such as migrations. For a repository named `MyApp.FooRepo`, `:priv` defaults to + "priv/foo_repo" and migrations should be placed at "priv/foo_repo/migrations" + + * `:start_apps_before_migration` - A list of applications to be started before + running migrations. Used by `Ecto.Migrator.with_repo/3` and the migration tasks: + + config :app, App.Repo, start_apps_before_migration: [:ssl, :some_custom_logger] + + ### Migrations configuration + + These options configure how each migration works. **It is generally discouraged + to change any of those configurations after your database is deployed to production, + as changing these options will retroactively change how all migrations work**. + * `:migration_primary_key` - By default, Ecto uses the `:id` column with type `:bigserial`, but you can configure it via: @@ -212,8 +232,8 @@ defmodule Ecto.Migration do config :app, App.Repo, migration_primary_key: false - * `:migration_foreign_key` - By default, Ecto uses the migration_primary_key type - for foreign keys when references/2 is used, but you can configure it via: + * `:migration_foreign_key` - By default, Ecto uses the `primary_key` type + for foreign keys when `references/2` is used, but you can configure it via: config :app, App.Repo, migration_foreign_key: [column: :uuid, type: :binary_id] @@ -228,31 +248,25 @@ defmodule Ecto.Migration do updated_at: :changed_at ] - * `:migration_lock` - By default, Ecto will lock the migration table. This allows - multiple nodes to attempt to run migrations at the same time but only one will - succeed. You can disable the `migration_lock` by setting it to `false` - - config :app, App.Repo, migration_lock: false - * `:migration_default_prefix` - Ecto defaults to `nil` for the database prefix for migrations, but you can configure it via: config :app, App.Repo, migration_default_prefix: "my_prefix" - * `:migration_repo` - The migration repository is where the table managing the - migrations will be stored (`migration_source` defines the table name). It defaults - to the given repository itself but you can configure it via: - - config :app, App.Repo, migration_repo: App.MigrationRepo - - * `:priv` - the priv directory for the repo with the location of important assets, - such as migrations. For a repository named `MyApp.FooRepo`, `:priv` defaults to - "priv/foo_repo" and migrations should be placed at "priv/foo_repo/migrations" + ## Comments - * `:start_apps_before_migration` - A list of applications to be started before - running migrations. Used by `Ecto.Migrator.with_repo/3` and the migration tasks: + Migrations where you create or alter a table support specifying table + and column comments. The same can be done when creating constraints + and indexes. Not all databases support this feature. - config :app, App.Repo, start_apps_before_migration: [:ssl, :some_custom_logger] + def up do + create index("posts", [:name], comment: "Index Comment") + create constraint("products", "price_must_be_positive", check: "price > 0", comment: "Constraint Comment") + create table("weather", prefix: "north_america", comment: "Table Comment") do + add :city, :string, size: 40, comment: "Column Comment" + timestamps() + end + end ## Prefixes @@ -314,6 +328,11 @@ defmodule Ecto.Migration do Then in your migrations you can `use MyApp.Migration` to share this behavior among all your migrations. + + ## Additional resources + + * The [Safe Ecto Migrations guide](https://fly.io/phoenix-files/safe-ecto-migrations/) + """ @doc """ @@ -347,6 +366,7 @@ defmodule Ecto.Migration do concurrently: false, using: nil, include: [], + nulls_distinct: nil, where: nil, comment: nil, options: nil @@ -360,6 +380,7 @@ defmodule Ecto.Migration do concurrently: boolean, using: atom | String.t, include: [atom | String.t], + nulls_distinct: boolean | nil, where: atom | String.t, comment: String.t | nil, options: String.t @@ -373,7 +394,7 @@ defmodule Ecto.Migration do To define a table in a migration, see `Ecto.Migration.table/2`. """ defstruct name: nil, prefix: nil, comment: nil, primary_key: true, engine: nil, options: nil - @type t :: %__MODULE__{name: String.t, prefix: atom | nil, comment: String.t | nil, primary_key: boolean, + @type t :: %__MODULE__{name: String.t, prefix: atom | nil, comment: String.t | nil, primary_key: boolean | keyword(), engine: atom, options: String.t} end @@ -474,7 +495,7 @@ defmodule Ecto.Migration do table = %Table{} = unquote(object) Runner.start_command({unquote(command), Ecto.Migration.__prefix__(table)}) - if primary_key = table.primary_key && Ecto.Migration.__primary_key__() do + if primary_key = Ecto.Migration.__primary_key__(table) do {name, type, opts} = primary_key add(name, type, opts) end @@ -559,7 +580,7 @@ defmodule Ecto.Migration do defp do_create(table, command) do columns = - if primary_key = table.primary_key && Ecto.Migration.__primary_key__() do + if primary_key = Ecto.Migration.__primary_key__(table) do {name, type, opts} = primary_key [{:add, name, type, opts}] else @@ -642,7 +663,10 @@ defmodule Ecto.Migration do ## Options * `:primary_key` - when `false`, a primary key field is not generated on table - creation. + creation. Alternatively, a keyword list in the same style of the + `:migration_primary_key` repository configuration can be supplied + to control the generation of the primary key field. The keyword list + must include `:name` and `:type`. See `add/3` for further options. * `:engine` - customizes the table storage for supported databases. For MySQL, the default is InnoDB. * `:prefix` - the prefix for the table. This prefix will automatically be used @@ -683,6 +707,12 @@ defmodule Ecto.Migration do * `:include` - specify fields for a covering index. This is not supported by all databases. For more information on PostgreSQL support, please [read the official docs](https://www.postgresql.org/docs/current/indexes-index-only-scans.html). + * `:nulls_distinct` - specify whether null values should be considered + distinct for a unique index. Defaults to `nil`, which will not add the + parameter to the generated SQL and thus use the database default. + This option is currently only supported by PostgreSQL 15+. + For MySQL, it is always false. For MSSQL, it is always true. + See the dedicated section on this option for more information. * `:comment` - adds a comment to the index. ## Adding/dropping indexes concurrently @@ -692,29 +722,33 @@ defmodule Ecto.Migration do However, this feature does not work well with the transactions used by Ecto to guarantee integrity during migrations. - Therefore, to migrate indexes concurrently, you need to set - both `@disable_ddl_transaction` and `@disable_migration_lock` to true: + You can address this with two changes: + + 1. Change your repository to use PG advisory locks as the migration lock. + Note this may not be supported by Postgres-like databases and proxies. + + 2. Disable DDL transactions. Doing this removes the guarantee that all of + the changes in the migration will happen at once, so you will want to + keep it short. + If the database adapter supports several migration lock strategies, such as + Postgrex, then review those strategies and consider using a strategy that + utilizes advisory locks to faciliate running migrations one at a time even + across multiple nodes. For example: + + # Config the Repo (PostgreSQL example) + config MyApp.Repo, migration_lock: :pg_advisory_lock + + # Migrate with your concurrent operation defmodule MyRepo.Migrations.CreateIndexes do use Ecto.Migration @disable_ddl_transaction true - @disable_migration_lock true def change do create index("posts", [:slug], concurrently: true) end end - Disabling DDL transactions removes the guarantee that all of the changes - in the migration will happen at once. Disabling the migration lock removes - the guarantee only a single node will run a given migration if multiple - nodes are attempting to migrate at the same time. - - Since running migrations outside a transaction and without locks can be - dangerous, consider performing very few operations in migrations that add - concurrent indexes. We recommend to run migrations with concurrent indexes - in isolation and disable those features only temporarily. - ## Index types When creating an index, the index type can be specified with the `:using` @@ -737,6 +771,29 @@ defmodule Ecto.Migration do More information on partial indexes can be found in the [PostgreSQL docs](http://www.postgresql.org/docs/current/indexes-partial.html). + ## The `:nulls_distinct` option + + A unique index does not prevent multiple null values by default in most databases. + + For example, imagine we have a "products" table and need to guarantee that + sku's are unique within their category, but the category is optional. + Creating a regular unique index over the sku and category_id fields with: + + create index("products", [:sku, :category_id], unique: true) + + will allow products with the same sku to be inserted if their category_id is `nil`. + The `:nulls_distinct` option can be used to change this behavior by considering + null values as equal, i.e. not distinct: + + create index("products", [:sku, :category_id], unique: true, nulls_distinct: false) + + This option is currently only supported by PostgreSQL 15+. + As a workaround for older PostgreSQL versions and other databases, an + additional partial unique index for the sku can be created: + + create index("products", [:sku, :category_id], unique: true) + create index("products", [:sku], unique: true, where: "category_id IS NULL") + ## Examples # With no name provided, the name of the below index defaults to @@ -1317,6 +1374,10 @@ defmodule Ecto.Migration do end defp validate_index_opts!(opts) when is_list(opts) do + if opts[:nulls_distinct] != nil and opts[:unique] != true do + raise ArgumentError, "the `nulls_distinct` option can only be used with unique indexes" + end + case Keyword.get_values(opts, :where) do [_, _ | _] -> raise ArgumentError, @@ -1353,16 +1414,27 @@ defmodule Ecto.Migration do end @doc false - def __primary_key__() do - case Runner.repo_config(:migration_primary_key, []) do - false -> - false - - opts when is_list(opts) -> - opts = Keyword.put(opts, :primary_key, true) - {name, opts} = Keyword.pop(opts, :name, :id) - {type, opts} = Keyword.pop(opts, :type, :bigserial) - {name, type, opts} + def __primary_key__(table) do + case table.primary_key do + false -> false + + true -> + case Runner.repo_config(:migration_primary_key, []) do + false -> false + opts when is_list(opts) -> pk_opts_to_tuple(opts) + end + + opts when is_list(opts) -> pk_opts_to_tuple(opts) + + _ -> + raise ArgumentError, ":primary_key option must be either a boolean or a keyword list of options" end end + + defp pk_opts_to_tuple(opts) do + opts = Keyword.put(opts, :primary_key, true) + {name, opts} = Keyword.pop(opts, :name, :id) + {type, opts} = Keyword.pop(opts, :type, :bigserial) + {name, type, opts} + end end diff --git a/.deps/ecto_sql/lib/ecto/migration/runner.ex b/.deps/ecto_sql/lib/ecto/migration/runner.ex @@ -306,34 +306,8 @@ defmodule Ecto.Migration.Runner do end defp log_and_execute_ddl(repo, migration, log, {instruction, %Index{} = index}) do - if index.concurrently do - migration_config = migration.__migration__() - - if not migration_config[:disable_ddl_transaction] do - IO.warn """ - Migration #{inspect(migration)} has set index `#{index.name}` on table \ - `#{index.table}` to concurrently but did not disable ddl transaction. \ - Please set: - - use Ecto.Migration - @disable_ddl_transaction true - - """, [] - end - - if not migration_config[:disable_migration_lock] do - IO.warn """ - Migration #{inspect(migration)} has set index `#{index.name}` on table \ - `#{index.table}` to concurrently but did not disable migration lock. \ - Please set: - - use Ecto.Migration - @disable_migration_lock true - - """, [] - end - end - + maybe_warn_index_ddl_transaction(index, migration) + maybe_warn_index_migration_lock(index, repo, migration) log_and_execute_ddl(repo, log, {instruction, index}) end @@ -363,6 +337,62 @@ defmodule Ecto.Migration.Runner do defp log(true, msg, metadata), do: Logger.log(:info, msg, metadata) defp log(level, msg, metadata), do: Logger.log(level, msg, metadata) + defp maybe_warn_index_ddl_transaction(%{concurrently: true} = index, migration) do + migration_config = migration.__migration__() + + if not migration_config[:disable_ddl_transaction] do + IO.warn """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable ddl transaction. \ + Please set: + + use Ecto.Migration + @disable_ddl_transaction true + """, [] + end + end + defp maybe_warn_index_ddl_transaction(_index, _migration), do: :ok + + defp maybe_warn_index_migration_lock(%{concurrently: true} = index, repo, migration) do + migration_lock_disabled = migration.__migration__()[:disable_migration_lock] + lock_strategy = repo.config()[:migration_lock] + adapter = repo.__adapter__() + + case {migration_lock_disabled, adapter, lock_strategy} do + {false, Ecto.Adapters.Postgres, :pg_advisory_lock} -> + :ok + + {false, Ecto.Adapters.Postgres, _} -> + IO.warn """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable migration lock. \ + Please set: + + use Ecto.Migration + @disable_migration_lock true + + Alternatively, consider using advisory locks during migrations in the \ + repo configuration: + + config #{inspect(repo)}, migration_lock: :pg_advisory_lock + """, [] + + {false, _adapter, _migration_lock} -> + IO.warn """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable migration lock. \ + Please set: + + use Ecto.Migration + @disable_migration_lock true + """, [] + + _ -> + :ok + end + end + defp maybe_warn_index_migration_lock(_index, _repo, _migration), do: :ok + defp command(ddl) when is_binary(ddl) or is_list(ddl), do: "execute #{inspect ddl}" @@ -391,9 +421,9 @@ defmodule Ecto.Migration.Runner do do: "rename column #{current_column} to #{new_column} on table #{quote_name(table.prefix, table.name)}" defp command({:create, %Constraint{check: nil, exclude: nil}}), - do: raise ArgumentError, "a constraint must have either a check or exclude option" + do: raise(ArgumentError, "a constraint must have either a check or exclude option") defp command({:create, %Constraint{check: check, exclude: exclude}}) when is_binary(check) and is_binary(exclude), - do: raise ArgumentError, "a constraint must not have both check and exclude options" + do: raise(ArgumentError, "a constraint must not have both check and exclude options") defp command({:create, %Constraint{check: check} = constraint}) when is_binary(check), do: "create check constraint #{constraint.name} on table #{quote_name(constraint.prefix, constraint.table)}" defp command({:create, %Constraint{exclude: exclude} = constraint}) when is_binary(exclude), diff --git a/.deps/ecto_sql/lib/ecto/migration/schema_migration.ex b/.deps/ecto_sql/lib/ecto/migration/schema_migration.ex @@ -14,7 +14,12 @@ defmodule Ecto.Migration.SchemaMigration do # The migration flag is used to signal to the repository # we are in a migration operation. - @default_opts [timeout: :infinity, log: false, schema_migration: true] + @default_opts [ + timeout: :infinity, + log: false, + schema_migration: true, + telemetry_options: [schema_migration: true] + ] def ensure_schema_migrations_table!(repo, config, opts) do {repo, source} = get_repo_and_source(repo, config) diff --git a/.deps/ecto_sql/lib/ecto/migrator.ex b/.deps/ecto_sql/lib/ecto/migrator.ex @@ -216,14 +216,6 @@ defmodule Ecto.Migrator do """ @spec up(Ecto.Repo.t, integer, module, Keyword.t) :: :ok | :already_up def up(repo, version, module, opts \\ []) do - opts = - if log_sql = opts[:log_sql] do - IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") - Keyword.put(opts, :log_migrations_sql, log_sql) - else - opts - end - conditional_lock_for_migrations module, version, repo, opts, fn config, versions -> if version in versions do :already_up @@ -286,14 +278,6 @@ defmodule Ecto.Migrator do """ @spec down(Ecto.Repo.t, integer, module) :: :ok | :already_down def down(repo, version, module, opts \\ []) do - opts = - if log_sql = opts[:log_sql] do - IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") - Keyword.put(opts, :log_migrations_sql, log_sql) - else - opts - end - conditional_lock_for_migrations module, version, repo, opts, fn config, versions -> if version in versions do do_down(repo, config, version, module, opts) @@ -408,14 +392,6 @@ defmodule Ecto.Migrator do """ @spec run(Ecto.Repo.t, String.t | [String.t] | [{integer, module}], atom, Keyword.t) :: [integer] def run(repo, migration_source, direction, opts) do - opts = - if log_sql = opts[:log_sql] do - IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") - Keyword.put(opts, :log_migrations_sql, log_sql) - else - opts - end - migration_source = List.wrap(migration_source) pending = diff --git a/.deps/ecto_sql/mix.exs b/.deps/ecto_sql/mix.exs @@ -2,7 +2,7 @@ defmodule EctoSQL.MixProject do use Mix.Project @source_url "https://github.com/elixir-ecto/ecto_sql" - @version "3.8.3" + @version "3.9.0" @adapters ~w(pg myxql tds) def project do @@ -77,7 +77,7 @@ defmodule EctoSQL.MixProject do if path = System.get_env("ECTO_PATH") do {:ecto, path: path} else - {:ecto, "~> 3.8.4"} + {:ecto, "~> 3.9.0"} end end @@ -85,7 +85,7 @@ defmodule EctoSQL.MixProject do if path = System.get_env("POSTGREX_PATH") do {:postgrex, path: path} else - {:postgrex, "~> 0.15.0 or ~> 0.16.0 or ~> 1.0", optional: true} + {:postgrex, "~> 0.16.0 or ~> 1.0", optional: true} end end diff --git a/.deps/postgrex/.hex b/.deps/postgrex/.hex Binary files differ. diff --git a/.deps/postgrex/CHANGELOG.md b/.deps/postgrex/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## v0.16.5 (2022-09-20) + +* Enhancements + * Allow the `:search_path` to be set for new connections + +## v0.16.4 (2022-07-29) + +* Enhancements + * Support Unix sockets in hostname and PGHOST + * Support infinity value on numerics/decimals (PG14+) + * Add count to Table.Reader metadata + * Fix warnings on Elixir v1.15 + ## v0.16.3 (2022-04-27) * Enhancements diff --git a/.deps/postgrex/hex_metadata.config b/.deps/postgrex/hex_metadata.config @@ -78,4 +78,4 @@ {<<"optional">>,false}, {<<"repository">>,<<"hexpm">>}, {<<"requirement">>,<<"~> 1.1">>}]]}. -{<<"version">>,<<"0.16.3">>}. +{<<"version">>,<<"0.16.5">>}. diff --git a/.deps/postgrex/lib/postgrex.ex b/.deps/postgrex/lib/postgrex.ex @@ -53,6 +53,7 @@ defmodule Postgrex do | {:prepare, :named | :unnamed} | {:transactions, :strict | :naive} | {:types, module} + | {:search_path, [String.t()]} | {:disconnect_on_error_codes, [atom]} | DBConnection.start_option() @@ -158,6 +159,15 @@ defmodule Postgrex do option is only required when using custom encoding or decoding (default: `Postgrex.DefaultTypes`); + * `:search_path` - A list of strings used to set the search path for the connection. + This is useful when, for instance, an extension like `citext` is installed in a + separate schema. If that schema is not in the connection's search path, Postgrex + might not be able to recognize the extension's data type. When this option is `nil`, + the search path is not modified. (default: `nil`). + See the [PostgreSQL docs](https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH) + for more details. + + `Postgrex` uses the `DBConnection` library and supports all `DBConnection` options like `:idle`, `:after_connect` etc. See `DBConnection.start_link/2` for more information. @@ -179,20 +189,38 @@ defmodule Postgrex do ## SSL client authentication - When connecting to CockroachDB instances running in secure mode it is idiomatic to use - client SSL certificate authentication. + When connecting to Postgres or CockroachDB instances over SSL it is idiomatic to use + certificate authentication. Config files do not allowing passing functions, + so use the `init` callback of the Ecto supervisor. - An example of Repository configuration: + In your Repository configuration: config :app, App.Repo, ssl: String.to_existing_atom(System.get_env("DB_SSL_ENABLED", "true")), - ssl_opts: [ + verify_ssl: true + + And in App.Repo, set your `:ssl_opts`: + + def init(_type, config) do + config = + if config[:verify_ssl] do + Keyword.put(config, :ssl_opts, my_ssl_opts(config[:hostname])) + else + config + end + + {:ok, config} + end + + def my_ssl_opts(server) do + [ verify: :verify_peer, - server_name_indication: System.get_env("DB_HOSTNAME") cacertfile: System.get_env("DB_CA_CERT_FILE"), + server_name_indication: String.to_charlist(server), customize_hostname_check: [match_fun: :public_key.pkix_verify_hostname_match_fun(:https)], depth: 3 ] + end ## PgBouncer diff --git a/.deps/postgrex/lib/postgrex/extensions/array.ex b/.deps/postgrex/lib/postgrex/extensions/array.ex @@ -28,8 +28,8 @@ defmodule Postgrex.Extensions.Array do def decode(_) do quote location: :keep do - <<len::int32, binary::binary-size(len)>>, [oid], [type] -> - <<ndims::int32, _has_null::int32, ^oid::uint32, dims::size(ndims)-binary-unit(64), + <<len::int32(), binary::binary-size(len)>>, [oid], [type] -> + <<ndims::int32(), _has_null::int32(), ^oid::uint32(), dims::size(ndims)-binary-unit(64), data::binary>> = binary # decode_list/2 defined by TypeModule @@ -45,14 +45,14 @@ defmodule Postgrex.Extensions.Array do # While libpq will decode an payload encoded for a 0-dim array, CockroachDB will not. # Also, this is how libpq actually encodes 0-dim arrays. def encode([], elem_oid, _encoder) do - <<20::int32, 1::int32, 0::int32, elem_oid::uint32, 0::int32, 1::int32>> + <<20::int32(), 1::int32(), 0::int32(), elem_oid::uint32(), 0::int32(), 1::int32()>> end def encode(list, elem_oid, encoder) do {data, ndims, lengths} = encode(list, 0, [], encoder) - lengths = for len <- Enum.reverse(lengths), do: <<len::int32, 1::int32>> - iodata = [<<ndims::int32, 0::int32, elem_oid::uint32>>, lengths, data] - [<<IO.iodata_length(iodata)::int32>> | iodata] + lengths = for len <- Enum.reverse(lengths), do: <<len::int32(), 1::int32()>> + iodata = [<<ndims::int32(), 0::int32(), elem_oid::uint32()>>, lengths, data] + [<<IO.iodata_length(iodata)::int32()>> | iodata] end defp encode([], ndims, lengths, _encoder) do @@ -96,7 +96,7 @@ defmodule Postgrex.Extensions.Array do end end - defp decode_dims(<<len::int32, _lbound::int32, rest::binary>>, acc) do + defp decode_dims(<<len::int32(), _lbound::int32(), rest::binary>>, acc) do decode_dims(rest, [len | acc]) end diff --git a/.deps/postgrex/lib/postgrex/extensions/bit_string.ex b/.deps/postgrex/lib/postgrex/extensions/bit_string.ex @@ -8,7 +8,7 @@ defmodule Postgrex.Extensions.BitString do def encode(_) do quote location: :keep, generated: true do val when is_binary(val) -> - [<<byte_size(val) + 4::int32, bit_size(val)::uint32>> | val] + [<<byte_size(val) + 4::int32(), bit_size(val)::uint32()>> | val] val when is_bitstring(val) -> bin_size = byte_size(val) @@ -18,7 +18,7 @@ defmodule Postgrex.Extensions.BitString do bit_count = bit_size(val) [ - <<bin_size + 4::int32, bit_count::uint32>>, + <<bin_size + 4::int32(), bit_count::uint32()>>, binary | <<last::bits, 0::size(pad)>> ] @@ -30,7 +30,7 @@ defmodule Postgrex.Extensions.BitString do def decode(:copy) do quote location: :keep do - <<len::int32, value::binary-size(len)>> -> + <<len::int32(), value::binary-size(len)>> -> copy = :binary.copy(value) <<len::unsigned-32, bits::bits-size(len), _::bits>> = copy bits @@ -39,8 +39,8 @@ defmodule Postgrex.Extensions.BitString do def decode(:reference) do quote location: :keep do - <<len::int32, value::binary-size(len)>> -> - <<len::int32, bits::bits-size(len), _::bits>> = value + <<len::int32(), value::binary-size(len)>> -> + <<len::int32(), bits::bits-size(len), _::bits>> = value bits end end diff --git a/.deps/postgrex/lib/postgrex/extensions/bool.ex b/.deps/postgrex/lib/postgrex/extensions/bool.ex @@ -6,10 +6,10 @@ defmodule Postgrex.Extensions.Bool do def encode(_) do quote location: :keep do true -> - <<1::int32, 1>> + <<1::int32(), 1>> false -> - <<1::int32, 0>> + <<1::int32(), 0>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a boolean") @@ -18,8 +18,8 @@ defmodule Postgrex.Extensions.Bool do def decode(_) do quote location: :keep do - <<1::int32, 1>> -> true - <<1::int32, 0>> -> false + <<1::int32(), 1>> -> true + <<1::int32(), 0>> -> false end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/box.ex b/.deps/postgrex/lib/postgrex/extensions/box.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.Box do encoded_p1 = Point.encode_point(p1, Postgrex.Box) encoded_p2 = Point.encode_point(p2, Postgrex.Box) # 2 points -> 16 bytes each - [<<32::int32>>, encoded_p1 | encoded_p2] + [<<32::int32()>>, encoded_p1 | encoded_p2] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Line) @@ -20,7 +20,7 @@ defmodule Postgrex.Extensions.Box do def decode(_) do quote location: :keep do # 2 points -> 16 bytes each - <<32::int32, x1::float64, y1::float64, x2::float64, y2::float64>> -> + <<32::int32(), x1::float64(), y1::float64(), x2::float64(), y2::float64()>> -> p1 = %Postgrex.Point{x: x1, y: y1} p2 = %Postgrex.Point{x: x2, y: y2} %Postgrex.Box{upper_right: p1, bottom_left: p2} diff --git a/.deps/postgrex/lib/postgrex/extensions/circle.ex b/.deps/postgrex/lib/postgrex/extensions/circle.ex @@ -7,7 +7,7 @@ defmodule Postgrex.Extensions.Circle do quote location: :keep do %Postgrex.Circle{center: %Postgrex.Point{x: x, y: y}, radius: r} when is_number(x) and is_number(y) and is_number(r) and r >= 0 -> - <<24::int32, x::float64, y::float64, r::float64>> + <<24::int32(), x::float64(), y::float64(), r::float64()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Path) @@ -16,7 +16,7 @@ defmodule Postgrex.Extensions.Circle do def decode(_) do quote location: :keep do - <<24::int32, x::float64, y::float64, r::float64>> -> + <<24::int32(), x::float64(), y::float64(), r::float64()>> -> %Postgrex.Circle{center: %Postgrex.Point{x: x, y: y}, radius: r} end end diff --git a/.deps/postgrex/lib/postgrex/extensions/date.ex b/.deps/postgrex/lib/postgrex/extensions/date.ex @@ -26,7 +26,7 @@ defmodule Postgrex.Extensions.Date do def decode(_) do quote location: :keep do - <<4::int32, days::int32>> -> + <<4::int32(), days::int32()>> -> unquote(__MODULE__).day_to_elixir(days) end end @@ -34,7 +34,7 @@ defmodule Postgrex.Extensions.Date do ## Helpers def encode_elixir(%Date{year: year} = date) when year <= @max_year do - <<4::int32, Date.to_gregorian_days(date) - @gd_epoch::int32>> + <<4::int32(), Date.to_gregorian_days(date) - @gd_epoch::int32()>> end def encode_elixir(%Date{} = date) do diff --git a/.deps/postgrex/lib/postgrex/extensions/float4.ex b/.deps/postgrex/lib/postgrex/extensions/float4.ex @@ -6,16 +6,16 @@ defmodule Postgrex.Extensions.Float4 do def encode(_) do quote location: :keep do n when is_number(n) -> - <<4::int32, n::float32>> + <<4::int32(), n::float32()>> :NaN -> - <<4::int32, 0::1, 255, 1::1, 0::22>> + <<4::int32(), 0::1, 255, 1::1, 0::22>> :inf -> - <<4::int32, 0::1, 255, 0::23>> + <<4::int32(), 0::1, 255, 0::23>> :"-inf" -> - <<4::int32, 1::1, 255, 0::23>> + <<4::int32(), 1::1, 255, 0::23>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a float") @@ -24,10 +24,10 @@ defmodule Postgrex.Extensions.Float4 do def decode(_) do quote location: :keep do - <<4::int32, 0::1, 255, 0::23>> -> :inf - <<4::int32, 1::1, 255, 0::23>> -> :"-inf" - <<4::int32, _::1, 255, _::23>> -> :NaN - <<4::int32, float::float32>> -> float + <<4::int32(), 0::1, 255, 0::23>> -> :inf + <<4::int32(), 1::1, 255, 0::23>> -> :"-inf" + <<4::int32(), _::1, 255, _::23>> -> :NaN + <<4::int32(), float::float32()>> -> float end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/float8.ex b/.deps/postgrex/lib/postgrex/extensions/float8.ex @@ -6,16 +6,16 @@ defmodule Postgrex.Extensions.Float8 do def encode(_) do quote location: :keep do n when is_number(n) -> - <<8::int32, n::float64>> + <<8::int32(), n::float64()>> :NaN -> - <<8::int32, 0::1, 2047::11, 1::1, 0::51>> + <<8::int32(), 0::1, 2047::11, 1::1, 0::51>> :inf -> - <<8::int32, 0::1, 2047::11, 0::52>> + <<8::int32(), 0::1, 2047::11, 0::52>> :"-inf" -> - <<8::int32, 1::1, 2047::11, 0::52>> + <<8::int32(), 1::1, 2047::11, 0::52>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a float") @@ -24,10 +24,10 @@ defmodule Postgrex.Extensions.Float8 do def decode(_) do quote location: :keep do - <<8::int32, 0::1, 2047::11, 0::52>> -> :inf - <<8::int32, 1::1, 2047::11, 0::52>> -> :"-inf" - <<8::int32, _::1, 2047::11, _::52>> -> :NaN - <<8::int32, float::float64>> -> float + <<8::int32(), 0::1, 2047::11, 0::52>> -> :inf + <<8::int32(), 1::1, 2047::11, 0::52>> -> :"-inf" + <<8::int32(), _::1, 2047::11, _::52>> -> :NaN + <<8::int32(), float::float64()>> -> float end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/hstore.ex b/.deps/postgrex/lib/postgrex/extensions/hstore.ex @@ -9,7 +9,7 @@ defmodule Postgrex.Extensions.HStore do quote location: :keep do %{} = map -> data = unquote(__MODULE__).encode_hstore(map) - [<<IO.iodata_length(data)::int32>> | data] + [<<IO.iodata_length(data)::int32()>> | data] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a map") @@ -18,7 +18,7 @@ defmodule Postgrex.Extensions.HStore do def decode(mode) do quote do - <<len::int32, data::binary-size(len)>> -> + <<len::int32(), data::binary-size(len)>> -> unquote(__MODULE__).decode_hstore(data, unquote(mode)) end end @@ -31,7 +31,7 @@ defmodule Postgrex.Extensions.HStore do [acc, encode_hstore_key(key), encode_hstore_value(value)] end) - [<<map_size(hstore_map)::int32>> | keys_and_values] + [<<map_size(hstore_map)::int32()>> | keys_and_values] end defp encode_hstore_key(key) when is_binary(key) do @@ -43,19 +43,19 @@ defmodule Postgrex.Extensions.HStore do end defp encode_hstore_value(nil) do - <<-1::int32>> + <<-1::int32()>> end defp encode_hstore_value(value) when is_binary(value) do value_byte_size = byte_size(value) - <<value_byte_size::int32>> <> value + <<value_byte_size::int32()>> <> value end - def decode_hstore(<<_length::int32, pairs::binary>>, :reference) do + def decode_hstore(<<_length::int32(), pairs::binary>>, :reference) do decode_hstore_ref(pairs, %{}) end - def decode_hstore(<<_length::int32, pairs::binary>>, :copy) do + def decode_hstore(<<_length::int32(), pairs::binary>>, :copy) do decode_hstore_copy(pairs, %{}) end @@ -65,14 +65,14 @@ defmodule Postgrex.Extensions.HStore do # in the case of a NULL value, there won't be a length defp decode_hstore_ref( - <<key_length::int32, key::binary(key_length), -1::int32, rest::binary>>, + <<key_length::int32(), key::binary(key_length), -1::int32(), rest::binary>>, acc ) do decode_hstore_ref(rest, Map.put(acc, key, nil)) end defp decode_hstore_ref( - <<key_length::int32, key::binary(key_length), value_length::int32, + <<key_length::int32(), key::binary(key_length), value_length::int32(), value::binary(value_length), rest::binary>>, acc ) do @@ -85,14 +85,14 @@ defmodule Postgrex.Extensions.HStore do # in the case of a NULL value, there won't be a length defp decode_hstore_copy( - <<key_length::int32, key::binary(key_length), -1::int32, rest::binary>>, + <<key_length::int32(), key::binary(key_length), -1::int32(), rest::binary>>, acc ) do decode_hstore_copy(rest, Map.put(acc, :binary.copy(key), nil)) end defp decode_hstore_copy( - <<key_length::int32, key::binary(key_length), value_length::int32, + <<key_length::int32(), key::binary(key_length), value_length::int32(), value::binary(value_length), rest::binary>>, acc ) do diff --git a/.deps/postgrex/lib/postgrex/extensions/inet.ex b/.deps/postgrex/lib/postgrex/extensions/inet.ex @@ -7,16 +7,16 @@ defmodule Postgrex.Extensions.INET do def encode(_) do quote location: :keep do %Postgrex.INET{address: {a, b, c, d}, netmask: nil} -> - <<8::int32, 2, 32, 0, 4, a, b, c, d>> + <<8::int32(), 2, 32, 0, 4, a, b, c, d>> %Postgrex.INET{address: {a, b, c, d}, netmask: n} -> - <<8::int32, 2, n, 1, 4, a, b, c, d>> + <<8::int32(), 2, n, 1, 4, a, b, c, d>> %Postgrex.INET{address: {a, b, c, d, e, f, g, h}, netmask: nil} -> - <<20::int32, 3, 128, 0, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> + <<20::int32(), 3, 128, 0, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> %Postgrex.INET{address: {a, b, c, d, e, f, g, h}, netmask: n} -> - <<20::int32, 3, n, 1, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> + <<20::int32(), 3, n, 1, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.INET) @@ -25,11 +25,11 @@ defmodule Postgrex.Extensions.INET do def decode(_) do quote location: :keep do - <<8::int32, 2, n, cidr?, 4, a, b, c, d>> -> + <<8::int32(), 2, n, cidr?, 4, a, b, c, d>> -> n = if(cidr? == 1 or n != 32, do: n, else: nil) %Postgrex.INET{address: {a, b, c, d}, netmask: n} - <<20::int32, 3, n, cidr?, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> -> + <<20::int32(), 3, n, cidr?, 16, a::16, b::16, c::16, d::16, e::16, f::16, g::16, h::16>> -> n = if(cidr? == 1 or n != 128, do: n, else: nil) %Postgrex.INET{address: {a, b, c, d, e, f, g, h}, netmask: n} end diff --git a/.deps/postgrex/lib/postgrex/extensions/int2.ex b/.deps/postgrex/lib/postgrex/extensions/int2.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.Int2 do quote location: :keep do int when is_integer(int) and int in unquote(range) -> - <<2::int32, int::int16>> + <<2::int32(), int::int16()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, unquote(range)) @@ -19,7 +19,7 @@ defmodule Postgrex.Extensions.Int2 do def decode(_) do quote location: :keep do - <<2::int32, int::int16>> -> int + <<2::int32(), int::int16()>> -> int end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/int4.ex b/.deps/postgrex/lib/postgrex/extensions/int4.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.Int4 do quote location: :keep do int when is_integer(int) and int in unquote(range) -> - <<4::int32, int::int32>> + <<4::int32(), int::int32()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, unquote(range)) @@ -19,7 +19,7 @@ defmodule Postgrex.Extensions.Int4 do def decode(_) do quote location: :keep do - <<4::int32, int::int32>> -> int + <<4::int32(), int::int32()>> -> int end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/int8.ex b/.deps/postgrex/lib/postgrex/extensions/int8.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.Int8 do quote location: :keep do int when is_integer(int) and int in unquote(range) -> - <<8::int32, int::int64>> + <<8::int32(), int::int64()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, unquote(range)) @@ -19,7 +19,7 @@ defmodule Postgrex.Extensions.Int8 do def decode(_) do quote location: :keep do - <<8::int32, int::int64>> -> int + <<8::int32(), int::int64()>> -> int end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/interval.ex b/.deps/postgrex/lib/postgrex/extensions/interval.ex @@ -7,7 +7,7 @@ defmodule Postgrex.Extensions.Interval do quote location: :keep do %Postgrex.Interval{months: months, days: days, secs: secs, microsecs: microsecs} -> microsecs = secs * 1_000_000 + microsecs - <<16::int32, microsecs::int64, days::int32, months::int32>> + <<16::int32(), microsecs::int64(), days::int32(), months::int32()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Interval) @@ -16,7 +16,7 @@ defmodule Postgrex.Extensions.Interval do def decode(_) do quote location: :keep do - <<16::int32, microsecs::int64, days::int32, months::int32>> -> + <<16::int32(), microsecs::int64(), days::int32(), months::int32()>> -> secs = div(microsecs, 1_000_000) microsecs = rem(microsecs, 1_000_000) %Postgrex.Interval{months: months, days: days, secs: secs, microsecs: microsecs} diff --git a/.deps/postgrex/lib/postgrex/extensions/json.ex b/.deps/postgrex/lib/postgrex/extensions/json.ex @@ -33,13 +33,13 @@ defmodule Postgrex.Extensions.JSON do quote location: :keep do map -> data = unquote(library).encode_to_iodata!(map) - [<<IO.iodata_length(data)::int32>> | data] + [<<IO.iodata_length(data)::int32()>> | data] end end def decode({library, :copy}) do quote location: :keep do - <<len::int32, json::binary-size(len)>> -> + <<len::int32(), json::binary-size(len)>> -> json |> :binary.copy() |> unquote(library).decode!() @@ -48,7 +48,7 @@ defmodule Postgrex.Extensions.JSON do def decode({library, :reference}) do quote location: :keep do - <<len::int32, json::binary-size(len)>> -> + <<len::int32(), json::binary-size(len)>> -> unquote(library).decode!(json) end end diff --git a/.deps/postgrex/lib/postgrex/extensions/jsonb.ex b/.deps/postgrex/lib/postgrex/extensions/jsonb.ex @@ -24,13 +24,13 @@ defmodule Postgrex.Extensions.JSONB do quote location: :keep do map -> data = unquote(library).encode_to_iodata!(map) - [<<IO.iodata_length(data) + 1::int32, 1>> | data] + [<<IO.iodata_length(data) + 1::int32(), 1>> | data] end end def decode({library, :copy}) do quote location: :keep do - <<len::int32, data::binary-size(len)>> -> + <<len::int32(), data::binary-size(len)>> -> <<1, json::binary>> = data json @@ -41,7 +41,7 @@ defmodule Postgrex.Extensions.JSONB do def decode({library, :reference}) do quote location: :keep do - <<len::int32, data::binary-size(len)>> -> + <<len::int32(), data::binary-size(len)>> -> <<1, json::binary>> = data unquote(library).decode!(json) end diff --git a/.deps/postgrex/lib/postgrex/extensions/line.ex b/.deps/postgrex/lib/postgrex/extensions/line.ex @@ -7,7 +7,7 @@ defmodule Postgrex.Extensions.Line do quote location: :keep do %Postgrex.Line{a: a, b: b, c: c} when is_number(a) and is_number(b) and is_number(c) -> # a, b, c are 8 bytes each - <<24::int32, a::float64, b::float64, c::float64>> + <<24::int32(), a::float64(), b::float64(), c::float64()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Line) @@ -17,7 +17,7 @@ defmodule Postgrex.Extensions.Line do def decode(_) do quote location: :keep do # a, b, c are 8 bytes each - <<24::int32, a::float64, b::float64, c::float64>> -> + <<24::int32(), a::float64(), b::float64(), c::float64()>> -> %Postgrex.Line{a: a, b: b, c: c} end end diff --git a/.deps/postgrex/lib/postgrex/extensions/line_segment.ex b/.deps/postgrex/lib/postgrex/extensions/line_segment.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.LineSegment do encoded_p1 = Point.encode_point(p1, Postgrex.LineSegment) encoded_p2 = Point.encode_point(p2, Postgrex.LineSegment) # 2 points -> 16 bytes each - [<<32::int32>>, encoded_p1 | encoded_p2] + [<<32::int32()>>, encoded_p1 | encoded_p2] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Line) @@ -20,7 +20,7 @@ defmodule Postgrex.Extensions.LineSegment do def decode(_) do quote location: :keep do # 2 points -> 16 bytes each - <<32::int32, x1::float64, y1::float64, x2::float64, y2::float64>> -> + <<32::int32(), x1::float64(), y1::float64(), x2::float64(), y2::float64()>> -> p1 = %Postgrex.Point{x: x1, y: y1} p2 = %Postgrex.Point{x: x2, y: y2} %Postgrex.LineSegment{point1: p1, point2: p2} diff --git a/.deps/postgrex/lib/postgrex/extensions/macaddr.ex b/.deps/postgrex/lib/postgrex/extensions/macaddr.ex @@ -6,7 +6,7 @@ defmodule Postgrex.Extensions.MACADDR do def encode(_) do quote location: :keep do %Postgrex.MACADDR{address: {a, b, c, d, e, f}} -> - <<6::int32, a, b, c, d, e, f>> + <<6::int32(), a, b, c, d, e, f>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.MACADDR) @@ -15,7 +15,7 @@ defmodule Postgrex.Extensions.MACADDR do def decode(_) do quote location: :keep do - <<6::int32, a::8, b::8, c::8, d::8, e::8, f::8>> -> + <<6::int32(), a::8, b::8, c::8, d::8, e::8, f::8>> -> %Postgrex.MACADDR{address: {a, b, c, d, e, f}} end end diff --git a/.deps/postgrex/lib/postgrex/extensions/name.ex b/.deps/postgrex/lib/postgrex/extensions/name.ex @@ -8,7 +8,7 @@ defmodule Postgrex.Extensions.Name do def encode(_) do quote location: :keep, generated: true do name when is_binary(name) and byte_size(name) < 64 -> - [<<byte_size(name)::int32>> | name] + [<<byte_size(name)::int32()>> | name] other -> msg = "a binary string of less than 64 bytes" @@ -18,13 +18,13 @@ defmodule Postgrex.Extensions.Name do def decode(:reference) do quote location: :keep do - <<len::int32, name::binary-size(len)>> -> name + <<len::int32(), name::binary-size(len)>> -> name end end def decode(:copy) do quote location: :keep do - <<len::int32, name::binary-size(len)>> -> :binary.copy(name) + <<len::int32(), name::binary-size(len)>> -> :binary.copy(name) end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/numeric.ex b/.deps/postgrex/lib/postgrex/extensions/numeric.ex @@ -7,21 +7,21 @@ defmodule Postgrex.Extensions.Numeric do quote location: :keep, generated: true do %Decimal{} = decimal -> data = unquote(__MODULE__).encode_numeric(decimal) - [<<IO.iodata_length(data)::int32>> | data] + [<<IO.iodata_length(data)::int32()>> | data] n when is_float(n) -> data = unquote(__MODULE__).encode_numeric(Decimal.from_float(n)) - [<<IO.iodata_length(data)::int32>> | data] + [<<IO.iodata_length(data)::int32()>> | data] n when is_integer(n) -> data = unquote(__MODULE__).encode_numeric(Decimal.new(n)) - [<<IO.iodata_length(data)::int32>> | data] + [<<IO.iodata_length(data)::int32()>> | data] end end def decode(_) do quote location: :keep do - <<len::int32, data::binary-size(len)>> -> + <<len::int32(), data::binary-size(len)>> -> unquote(__MODULE__).decode_numeric(data) end end @@ -30,11 +30,15 @@ defmodule Postgrex.Extensions.Numeric do # TODO: remove qNaN and sNaN when we depend on Decimal 2.0 def encode_numeric(%Decimal{coef: coef}) when coef in [:NaN, :qNaN, :sNaN] do - <<0::int16, 0::int16, 0xC000::uint16, 0::int16>> + <<0::int16(), 0::int16(), 0xC000::uint16(), 0::int16()>> end - def encode_numeric(%Decimal{coef: :inf} = decimal) do - raise ArgumentError, "cannot represent #{inspect(decimal)} as numeric type" + def encode_numeric(%Decimal{sign: 1, coef: :inf}) do + <<0::int16(), 0::int16(), 0xD000::uint16(), 0::int16()>> + end + + def encode_numeric(%Decimal{sign: -1, coef: :inf}) do + <<0::int16(), 0::int16(), 0xF000::uint16(), 0::int16()>> end def encode_numeric(%Decimal{sign: sign, coef: coef, exp: exp}) do @@ -49,8 +53,8 @@ defmodule Postgrex.Extensions.Numeric do num_digits = length(digits) weight = max(length(integer_digits) - 1, 0) - bin = for digit <- digits, into: "", do: <<digit::uint16>> - [<<num_digits::int16, weight::int16, sign::uint16, scale::int16>> | bin] + bin = for digit <- digits, into: "", do: <<digit::uint16()>> + [<<num_digits::int16(), weight::int16(), sign::uint16(), scale::int16()>> | bin] end defp encode_sign(1), do: 0x0000 @@ -87,16 +91,28 @@ defmodule Postgrex.Extensions.Numeric do encode_digits(coef, [digit | digits]) end - def decode_numeric(<<ndigits::int16, weight::int16, sign::uint16, scale::int16, tail::binary>>) do + def decode_numeric( + <<ndigits::int16(), weight::int16(), sign::uint16(), scale::int16(), tail::binary>> + ) do decode_numeric(ndigits, weight, sign, scale, tail) end @nan Decimal.new("NaN") + @positive_inf Decimal.new("Inf") + @negative_inf Decimal.new("-Inf") defp decode_numeric(0, _weight, 0xC000, _scale, "") do @nan end + defp decode_numeric(0, _weight, 0xD000, _scale, "") do + @positive_inf + end + + defp decode_numeric(0, _weight, 0xF000, _scale, "") do + @negative_inf + end + defp decode_numeric(_num_digits, weight, sign, scale, bin) do {value, weight} = decode_numeric_int(bin, weight, 0) sign = decode_sign(sign) @@ -120,7 +136,7 @@ defmodule Postgrex.Extensions.Numeric do defp decode_numeric_int("", weight, acc), do: {acc, weight} - defp decode_numeric_int(<<digit::int16, tail::binary>>, weight, acc) do + defp decode_numeric_int(<<digit::int16(), tail::binary>>, weight, acc) do acc = acc * 10_000 + digit decode_numeric_int(tail, weight - 1, acc) end diff --git a/.deps/postgrex/lib/postgrex/extensions/oid.ex b/.deps/postgrex/lib/postgrex/extensions/oid.ex @@ -13,7 +13,7 @@ defmodule Postgrex.Extensions.OID do quote location: :keep do oid when is_integer(oid) and oid in unquote(range) -> - <<4::int32, oid::uint32>> + <<4::int32(), oid::uint32()>> binary when is_binary(binary) -> msg = @@ -30,7 +30,7 @@ defmodule Postgrex.Extensions.OID do def decode(_) do quote location: :keep do - <<4::int32, oid::uint32>> -> oid + <<4::int32(), oid::uint32()>> -> oid end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/path.ex b/.deps/postgrex/lib/postgrex/extensions/path.ex @@ -16,7 +16,7 @@ defmodule Postgrex.Extensions.Path do # 1 byte for open/closed flag, 4 for length, 16 for each point nbytes = 5 + 16 * len - [<<nbytes::int32>>, open_byte, <<len::int32>> | encoded_points] + [<<nbytes::int32()>>, open_byte, <<len::int32()>> | encoded_points] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Path) @@ -25,12 +25,12 @@ defmodule Postgrex.Extensions.Path do def decode(_) do quote location: :keep do - <<nbytes::int32, path_data::binary-size(nbytes)>> -> + <<nbytes::int32(), path_data::binary-size(nbytes)>> -> Path.decode_path(path_data) end end - def decode_path(<<o::int8, n::int32, point_data::binary-size(n)-unit(128)>>) do + def decode_path(<<o::int8(), n::int32(), point_data::binary-size(n)-unit(128)>>) do open = o == 0 points = decode_points(point_data, []) %Postgrex.Path{open: open, points: points} @@ -41,7 +41,7 @@ defmodule Postgrex.Extensions.Path do defp decode_points(<<>>, points), do: Enum.reverse(points) - defp decode_points(<<x::float64, y::float64, rest::bits>>, points) do + defp decode_points(<<x::float64(), y::float64(), rest::bits>>, points) do decode_points(rest, [%Postgrex.Point{x: x, y: y} | points]) end end diff --git a/.deps/postgrex/lib/postgrex/extensions/point.ex b/.deps/postgrex/lib/postgrex/extensions/point.ex @@ -6,7 +6,7 @@ defmodule Postgrex.Extensions.Point do def encode(_) do quote location: :keep do %Postgrex.Point{x: x, y: y} -> - <<16::int32, x::float64, y::float64>> + <<16::int32(), x::float64(), y::float64()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Point) @@ -15,13 +15,13 @@ defmodule Postgrex.Extensions.Point do def decode(_) do quote location: :keep do - <<16::int32, x::float64, y::float64>> -> %Postgrex.Point{x: x, y: y} + <<16::int32(), x::float64(), y::float64()>> -> %Postgrex.Point{x: x, y: y} end end # used by other extensions def encode_point(%Postgrex.Point{x: x, y: y}, _) do - <<x::float64, y::float64>> + <<x::float64(), y::float64()>> end def encode_point(other, wanted) do diff --git a/.deps/postgrex/lib/postgrex/extensions/polygon.ex b/.deps/postgrex/lib/postgrex/extensions/polygon.ex @@ -18,7 +18,7 @@ defmodule Postgrex.Extensions.Polygon do # 32 bits for len, 64 for each x and each y nbytes = 4 + 16 * len - [<<nbytes::int32>>, <<len::int32>> | vert] + [<<nbytes::int32()>>, <<len::int32()>> | vert] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, Postgrex.Polygon) @@ -27,20 +27,20 @@ defmodule Postgrex.Extensions.Polygon do def decode(_) do quote location: :keep do - <<nbytes::int32, polygon_data::binary-size(nbytes)>> -> + <<nbytes::int32(), polygon_data::binary-size(nbytes)>> -> vertices = Polygon.decode_vertices(polygon_data) %Postgrex.Polygon{vertices: vertices} end end # n vertices, 128 bits for each vertex - 64 for x, 64 for y - def decode_vertices(<<n::int32, vert_data::binary-size(n)-unit(128)>>) do + def decode_vertices(<<n::int32(), vert_data::binary-size(n)-unit(128)>>) do decode_vertices(vert_data, []) end defp decode_vertices(<<>>, v), do: Enum.reverse(v) - defp decode_vertices(<<x::float64, y::float64, rest::bits>>, v) do + defp decode_vertices(<<x::float64(), y::float64(), rest::bits>>, v) do decode_vertices(rest, [%Postgrex.Point{x: x, y: y} | v]) end end diff --git a/.deps/postgrex/lib/postgrex/extensions/range.ex b/.deps/postgrex/lib/postgrex/extensions/range.ex @@ -35,7 +35,7 @@ defmodule Postgrex.Extensions.Range do def decode(_) do quote location: :keep do - <<len::int32, binary::binary-size(len)>>, [oid], [type] -> + <<len::int32(), binary::binary-size(len)>>, [oid], [type] -> <<flags, data::binary>> = binary # decode_list/2 defined by TypeModule case decode_list(data, type) do @@ -51,7 +51,7 @@ defmodule Postgrex.Extensions.Range do ## Helpers def encode(_range, _oid, :empty, :empty) do - [<<1::int32, @range_empty>>] + [<<1::int32(), @range_empty>>] end def encode(%{lower_inclusive: lower_inc, upper_inclusive: upper_inc}, _oid, lower, upper) do @@ -83,7 +83,7 @@ defmodule Postgrex.Extensions.Range do false -> flags end - [<<IO.iodata_length(data) + 1::int32>>, flags | data] + [<<IO.iodata_length(data) + 1::int32()>>, flags | data] end def decode(flags, _oid, []) when (flags &&& @range_empty) != 0 do diff --git a/.deps/postgrex/lib/postgrex/extensions/raw.ex b/.deps/postgrex/lib/postgrex/extensions/raw.ex @@ -17,7 +17,7 @@ defmodule Postgrex.Extensions.Raw do def encode(_) do quote location: :keep, generated: true do bin when is_binary(bin) -> - [<<byte_size(bin)::int32>> | bin] + [<<byte_size(bin)::int32()>> | bin] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a binary") @@ -26,13 +26,13 @@ defmodule Postgrex.Extensions.Raw do def decode(:copy) do quote location: :keep do - <<len::int32, value::binary-size(len)>> -> :binary.copy(value) + <<len::int32(), value::binary-size(len)>> -> :binary.copy(value) end end def decode(:reference) do quote location: :keep do - <<len::int32, value::binary-size(len)>> -> value + <<len::int32(), value::binary-size(len)>> -> value end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/record.ex b/.deps/postgrex/lib/postgrex/extensions/record.ex @@ -22,7 +22,7 @@ defmodule Postgrex.Extensions.Record do tuple, oids, types when is_tuple(tuple) -> # encode_tuple/3 defined by TypeModule data = encode_tuple(tuple, oids, types) - [<<IO.iodata_length(data) + 4::int32, tuple_size(tuple)::int32>> | data] + [<<IO.iodata_length(data) + 4::int32(), tuple_size(tuple)::int32()>> | data] other, _, _ -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a tuple") @@ -31,13 +31,13 @@ defmodule Postgrex.Extensions.Record do def decode(_) do quote location: :keep do - <<len::int32, binary::binary-size(len)>>, nil, types -> - <<count::int32, data::binary>> = binary + <<len::int32(), binary::binary-size(len)>>, nil, types -> + <<count::int32(), data::binary>> = binary # decode_tuple/3 defined by TypeModule decode_tuple(data, count, types) - <<len::int32, binary::binary-size(len)>>, oids, types -> - <<_::int32, data::binary>> = binary + <<len::int32(), binary::binary-size(len)>>, oids, types -> + <<_::int32(), data::binary>> = binary # decode_tuple/3 defined by TypeModule decode_tuple(data, oids, types) end diff --git a/.deps/postgrex/lib/postgrex/extensions/tid.ex b/.deps/postgrex/lib/postgrex/extensions/tid.ex @@ -6,7 +6,7 @@ defmodule Postgrex.Extensions.TID do def encode(_) do quote location: :keep do {block, tuple} -> - <<6::int32, block::uint32, tuple::uint16>> + <<6::int32(), block::uint32(), tuple::uint16()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a tuple of 2 integers") @@ -15,7 +15,7 @@ defmodule Postgrex.Extensions.TID do def decode(_) do quote location: :keep do - <<6::int32, block::uint32, tuple::uint16>> -> + <<6::int32(), block::uint32(), tuple::uint16()>> -> {block, tuple} end end diff --git a/.deps/postgrex/lib/postgrex/extensions/time.ex b/.deps/postgrex/lib/postgrex/extensions/time.ex @@ -15,7 +15,7 @@ defmodule Postgrex.Extensions.Time do def decode(_) do quote location: :keep do - <<8::int32, microsecs::int64>> -> + <<8::int32(), microsecs::int64()>> -> unquote(__MODULE__).microsecond_to_elixir(microsecs) end end @@ -25,7 +25,7 @@ defmodule Postgrex.Extensions.Time do def encode_elixir(%Time{hour: hour, minute: min, second: sec, microsecond: {usec, _}}) when hour in 0..23 and min in 0..59 and sec in 0..59 and usec in 0..999_999 do time = {hour, min, sec} - <<8::int32, :calendar.time_to_seconds(time) * 1_000_000 + usec::int64>> + <<8::int32(), :calendar.time_to_seconds(time) * 1_000_000 + usec::int64()>> end def microsecond_to_elixir(microsec) do diff --git a/.deps/postgrex/lib/postgrex/extensions/timestamp.ex b/.deps/postgrex/lib/postgrex/extensions/timestamp.ex @@ -27,7 +27,7 @@ defmodule Postgrex.Extensions.Timestamp do def decode(infinity?) do quote location: :keep do - <<8::int32, microsecs::int64>> -> + <<8::int32(), microsecs::int64()>> -> unquote(__MODULE__).microsecond_to_elixir(microsecs, unquote(infinity?)) end end @@ -48,7 +48,7 @@ defmodule Postgrex.Extensions.Timestamp do usec in 0..999_999 do {gregorian_seconds, usec} = NaiveDateTime.to_gregorian_seconds(date_time) secs = gregorian_seconds - @gs_epoch - <<8::int32, secs * 1_000_000 + usec::int64>> + <<8::int32(), secs * 1_000_000 + usec::int64()>> end def microsecond_to_elixir(@plus_infinity, infinity?) do diff --git a/.deps/postgrex/lib/postgrex/extensions/timestamptz.ex b/.deps/postgrex/lib/postgrex/extensions/timestamptz.ex @@ -32,7 +32,7 @@ defmodule Postgrex.Extensions.TimestampTZ do def decode(infinity?) do quote location: :keep do - <<8::int32, microsecs::int64>> -> + <<8::int32(), microsecs::int64()>> -> unquote(__MODULE__).microsecond_to_elixir(microsecs, unquote(infinity?)) end end @@ -42,7 +42,7 @@ defmodule Postgrex.Extensions.TimestampTZ do def encode_elixir(%DateTime{utc_offset: 0, std_offset: 0} = datetime) do case DateTime.to_unix(datetime, :microsecond) do microsecs when microsecs in @us_min..@us_max -> - <<8::int32, microsecs - @us_epoch::int64>> + <<8::int32(), microsecs - @us_epoch::int64()>> _ -> raise ArgumentError, "#{inspect(datetime)} is not in the year range -4713..9999" diff --git a/.deps/postgrex/lib/postgrex/extensions/timetz.ex b/.deps/postgrex/lib/postgrex/extensions/timetz.ex @@ -17,7 +17,7 @@ defmodule Postgrex.Extensions.TimeTZ do def decode(_) do quote location: :keep do - <<12::int32, microsecs::int64, tz::int32>> -> + <<12::int32(), microsecs::int64(), tz::int32()>> -> unquote(__MODULE__).microsecond_to_elixir(microsecs, tz) end end @@ -40,7 +40,7 @@ defmodule Postgrex.Extensions.TimeTZ do def encode_elixir(%Time{hour: hour, minute: min, second: sec, microsecond: {usec, _}}) when hour in 0..23 and min in 0..59 and sec in 0..59 and usec in 0..999_999 do time = {hour, min, sec} - <<12::int32, :calendar.time_to_seconds(time) * 1_000_000 + usec::int64, 0::int32>> + <<12::int32(), :calendar.time_to_seconds(time) * 1_000_000 + usec::int64(), 0::int32()>> end def microsecond_to_elixir(microsec, tz) do diff --git a/.deps/postgrex/lib/postgrex/extensions/tsvector.ex b/.deps/postgrex/lib/postgrex/extensions/tsvector.ex @@ -8,7 +8,7 @@ defmodule Postgrex.Extensions.TSVector do quote location: :keep do values when is_list(values) -> encoded_tsvectors = unquote(__MODULE__).encode_tsvector(values) - <<byte_size(encoded_tsvectors)::int32, encoded_tsvectors::binary>> + <<byte_size(encoded_tsvectors)::int32(), encoded_tsvectors::binary>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a list of tsvectors") @@ -17,8 +17,8 @@ defmodule Postgrex.Extensions.TSVector do def decode(_) do quote do - <<len::int32, value::binary-size(len)>> -> - <<nb_lexemes::int32, words::binary>> = value + <<len::int32(), value::binary-size(len)>> -> + <<nb_lexemes::int32(), words::binary>> = value unquote(__MODULE__).decode_tsvector_values(words) end end @@ -26,7 +26,7 @@ defmodule Postgrex.Extensions.TSVector do ## Helpers def encode_tsvector(values) do - <<length(values)::int32, encode_lexemes(values)::binary>> + <<length(values)::int32(), encode_lexemes(values)::binary>> end defp encode_lexemes(values) do diff --git a/.deps/postgrex/lib/postgrex/extensions/uuid.ex b/.deps/postgrex/lib/postgrex/extensions/uuid.ex @@ -8,7 +8,7 @@ defmodule Postgrex.Extensions.UUID do def encode(_) do quote location: :keep, generated: true do uuid when is_binary(uuid) and byte_size(uuid) == 16 -> - [<<16::int32>> | uuid] + [<<16::int32()>> | uuid] other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "a binary of 16 bytes") @@ -17,13 +17,13 @@ defmodule Postgrex.Extensions.UUID do def decode(:copy) do quote location: :keep do - <<16::int32, uuid::binary-16>> -> :binary.copy(uuid) + <<16::int32(), uuid::binary-16>> -> :binary.copy(uuid) end end def decode(:reference) do quote location: :keep do - <<16::int32, uuid::binary-16>> -> uuid + <<16::int32(), uuid::binary-16>> -> uuid end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/void_binary.ex b/.deps/postgrex/lib/postgrex/extensions/void_binary.ex @@ -6,7 +6,7 @@ defmodule Postgrex.Extensions.VoidBinary do def encode(_) do quote location: :keep do :void -> - <<0::int32>> + <<0::int32()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "the atom :void") @@ -15,7 +15,7 @@ defmodule Postgrex.Extensions.VoidBinary do def decode(_) do quote location: :keep do - <<0::int32>> -> :void + <<0::int32()>> -> :void end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/void_text.ex b/.deps/postgrex/lib/postgrex/extensions/void_text.ex @@ -12,7 +12,7 @@ defmodule Postgrex.Extensions.VoidText do def encode(_) do quote location: :keep do :void -> - <<0::int32>> + <<0::int32()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, "the atom :void") @@ -21,7 +21,7 @@ defmodule Postgrex.Extensions.VoidText do def decode(_) do quote location: :keep do - <<0::int32>> -> :void + <<0::int32()>> -> :void end end end diff --git a/.deps/postgrex/lib/postgrex/extensions/xid8.ex b/.deps/postgrex/lib/postgrex/extensions/xid8.ex @@ -10,7 +10,7 @@ defmodule Postgrex.Extensions.Xid8 do quote location: :keep do int when int in unquote(range) -> - <<8::int32, int::uint64>> + <<8::int32(), int::uint64()>> other -> raise DBConnection.EncodeError, Postgrex.Utils.encode_msg(other, unquote(range)) @@ -19,7 +19,7 @@ defmodule Postgrex.Extensions.Xid8 do def decode(_) do quote location: :keep do - <<8::int32, int::uint64>> -> int + <<8::int32(), int::uint64()>> -> int end end end diff --git a/.deps/postgrex/lib/postgrex/messages.ex b/.deps/postgrex/lib/postgrex/messages.ex @@ -84,7 +84,7 @@ defmodule Postgrex.Messages do ### decoders ### # auth - def parse(<<type::int32, rest::binary>>, ?R, size) do + def parse(<<type::int32(), rest::binary>>, ?R, size) do type = decode_auth_type(type) data = @@ -115,12 +115,12 @@ defmodule Postgrex.Messages do end # backend_key - def parse(<<pid::int32, key::int32>>, ?K, _size) do + def parse(<<pid::int32(), key::int32()>>, ?K, _size) do msg_backend_key(pid: pid, key: key) end # ready - def parse(<<status::int8>>, ?Z, _size) do + def parse(<<status::int8()>>, ?Z, _size) do status = case status do ?I -> :idle @@ -132,16 +132,16 @@ defmodule Postgrex.Messages do end # parameter_desc - def parse(<<len::uint16, rest::binary(len, 32)>>, ?t, _size) do + def parse(<<len::uint16(), rest::binary(len, 32)>>, ?t, _size) do oids = for <<oid::size(32) <- rest>>, do: oid msg_parameter_desc(type_oids: oids) end - def parse(<<overflow_len::uint16, _::binary>>, ?t, size) do + def parse(<<overflow_len::uint16(), _::binary>>, ?t, size) do len = div(size - 2, 4) - case <<len::uint16>> do - <<^overflow_len::uint16>> -> + case <<len::uint16()>> do + <<^overflow_len::uint16()>> -> msg_too_many_parameters(len: len, max_len: 0xFFFF) _ -> @@ -150,18 +150,18 @@ defmodule Postgrex.Messages do end # row_desc - def parse(<<len::uint16, rest::binary>>, ?T, _size) do + def parse(<<len::uint16(), rest::binary>>, ?T, _size) do fields = decode_row_fields(rest, len) msg_row_desc(fields: fields) end # data_row - def parse(<<_::uint16, rest::binary>>, ?D, _size) do + def parse(<<_::uint16(), rest::binary>>, ?D, _size) do msg_data_row(values: rest) end # notify - def parse(<<pg_pid::int32, rest::binary>>, ?A, _size) do + def parse(<<pg_pid::int32(), rest::binary>>, ?A, _size) do {channel, rest} = decode_string(rest) {payload, ""} = decode_string(rest) msg_notify(pg_pid: pg_pid, channel: channel, payload: payload) @@ -262,9 +262,9 @@ defmodule Postgrex.Messages do size = IO.iodata_length(data) + 4 if first do - [first, <<size::int32>>, data] + [first, <<size::int32()>>, data] else - [<<size::int32>>, data] + [<<size::int32()>>, data] end end @@ -275,7 +275,7 @@ defmodule Postgrex.Messages do [acc, to_string(key), 0, value, 0] end) - vsn = <<@protocol_vsn_major::int16, @protocol_vsn_minor::int16>> + vsn = <<@protocol_vsn_major::int16(), @protocol_vsn_minor::int16()>> {nil, [vsn, params, 0]} end @@ -291,8 +291,8 @@ defmodule Postgrex.Messages do # parse defp encode(msg_parse(name: name, statement: statement, type_oids: oids)) do - oids = for oid <- oids, into: "", do: <<oid::uint32>> - len = <<div(byte_size(oids), 4)::int16>> + oids = for oid <- oids, into: "", do: <<oid::uint32()>> + len = <<div(byte_size(oids), 4)::int16()>> {?P, [name, 0, statement, 0, len, oids]} end @@ -333,19 +333,19 @@ defmodule Postgrex.Messages do result_formats: result_formats ) ) do - pfs = for format <- param_formats, into: "", do: <<format(format)::int16>> - rfs = for format <- result_formats, into: "", do: <<format(format)::int16>> + pfs = for format <- param_formats, into: "", do: <<format(format)::int16()>> + rfs = for format <- result_formats, into: "", do: <<format(format)::int16()>> - len_pfs = <<div(byte_size(pfs), 2)::int16>> - len_rfs = <<div(byte_size(rfs), 2)::int16>> - len_params = <<length(params)::int16>> + len_pfs = <<div(byte_size(pfs), 2)::int16()>> + len_rfs = <<div(byte_size(rfs), 2)::int16()>> + len_params = <<length(params)::int16()>> {?B, [port, 0, stat, 0, len_pfs, pfs, len_params, params, len_rfs, rfs]} end # execute defp encode(msg_execute(name_port: port, max_rows: rows)) do - {?E, [port, 0, <<rows::int32>>]} + {?E, [port, 0, <<rows::int32()>>]} end # sync @@ -360,12 +360,12 @@ defmodule Postgrex.Messages do # ssl_request defp encode(msg_ssl_request()) do - {nil, <<1234::int16, 5679::int16>>} + {nil, <<1234::int16(), 5679::int16()>>} end # cancel_request defp encode(msg_cancel_request(pid: pid, key: key)) do - {nil, <<1234::int16, 5678::int16, pid::int32, key::int32>>} + {nil, <<1234::int16(), 5678::int16(), pid::int32(), key::int32()>>} end # copy_data @@ -392,7 +392,7 @@ defmodule Postgrex.Messages do defp decode_fields(<<0>>), do: [] - defp decode_fields(<<field::int8, rest::binary>>) do + defp decode_fields(<<field::int8(), rest::binary>>) do type = decode_field_type(field) {string, rest} = decode_string(rest) [{type, string} | decode_fields(rest)] @@ -414,8 +414,8 @@ defmodule Postgrex.Messages do defp decode_row_field(rest) do {name, rest} = decode_string(rest) - <<table_oid::uint32, column::int16, type_oid::uint32, type_size::int16, type_mod::int32, - format::int16, rest::binary>> = rest + <<table_oid::uint32(), column::int16(), type_oid::uint32(), type_size::int16(), + type_mod::int32(), format::int16(), rest::binary>> = rest field = row_field( @@ -444,9 +444,9 @@ defmodule Postgrex.Messages do defp decode_format(0), do: :text defp decode_format(1), do: :binary - defp decode_copy(<<format::int8, len::uint16, rest::binary(len, 16)>>) do + defp decode_copy(<<format::int8(), len::uint16(), rest::binary(len, 16)>>) do format = decode_format(format) - columns = for <<column::uint16 <- rest>>, do: decode_format(column) + columns = for <<column::uint16() <- rest>>, do: decode_format(column) {format, columns} end end diff --git a/.deps/postgrex/lib/postgrex/protocol.ex b/.deps/postgrex/lib/postgrex/protocol.ex @@ -106,7 +106,8 @@ defmodule Postgrex.Protocol do prepare: prepare, messages: [], ssl: ssl?, - target_server_type: target_server_type + target_server_type: target_server_type, + search_path: opts[:search_path] } connect_endpoints(endpoints, sock_opts ++ @sock_opts, connect_timeout, s, status, []) @@ -830,7 +831,7 @@ defmodule Postgrex.Protocol do init_recv(%{s | connection_id: pid, connection_key: key}, status, buffer) {:ok, msg_ready(), buffer} -> - check_target_server_type(s, status, buffer) + set_search_path(s, status, buffer) {:ok, msg_error(fields: fields), buffer} -> disconnect(s, Postgrex.Error.exception(postgres: fields), buffer) @@ -844,6 +845,68 @@ defmodule Postgrex.Protocol do end end + ## set search path on connection startup + + defp set_search_path(s, %{search_path: nil} = status, buffer), + do: set_search_path_done(s, status, buffer) + + defp set_search_path(s, %{search_path: search_path} = status, buffer) + when is_list(search_path), + do: set_search_path_send(s, status, buffer) + + defp set_search_path(_, %{search_path: search_path}, _) do + raise ArgumentError, + "expected :search_path to be a list of strings, got: #{inspect(search_path)}" + end + + defp set_search_path_send(s, status, buffer) do + search_path = Enum.intersperse(status.search_path, ",") + msg = msg_query(statement: ["set search_path to " | search_path]) + + case msg_send(s, msg, buffer) do + :ok -> + set_search_path_recv(s, status, buffer) + + {:disconnect, _, _} = dis -> + dis + end + end + + defp set_search_path_recv(s, status, buffer) do + case msg_recv(s, :infinity, buffer) do + {:ok, msg_row_desc(fields: fields), buffer} -> + {[@text_type_oid], ["search_path"]} = columns(fields) + set_search_path_recv(s, status, buffer) + + {:ok, msg_data_row(), buffer} -> + set_search_path_recv(s, status, buffer) + + {:ok, msg_command_complete(), buffer} -> + set_search_path_recv(s, status, buffer) + + {:ok, msg_ready(status: :idle), buffer} -> + set_search_path_done(s, status, buffer) + + {:ok, msg_ready(status: postgres), _buffer} -> + err = %Postgrex.Error{message: "unexpected postgres status: #{postgres}"} + {:disconnect, err, s} + + {:ok, msg_error(fields: fields), buffer} -> + err = Postgrex.Error.exception(postgres: fields) + {:disconnect, err, %{s | buffer: buffer}} + + {:ok, msg, buffer} -> + {s, status} = handle_msg(s, status, msg) + set_search_path_recv(s, status, buffer) + + {:disconnect, _, _} = dis -> + dis + end + end + + defp set_search_path_done(s, status, buffer), + do: check_target_server_type(s, status, buffer) + ## check_target_server_type defp check_target_server_type(s, %{target_server_type: :any} = status, buffer), @@ -875,7 +938,7 @@ defmodule Postgrex.Protocol do check_target_server_type_recv(s, status, buffer) {:ok, msg_data_row(values: values), buffer} -> - <<len::uint32, read_only_value::binary(len)>> = values + <<len::uint32(), read_only_value::binary(len)>> = values actual_server_type = case read_only_value do @@ -3131,7 +3194,7 @@ defmodule Postgrex.Protocol do {:more, 0} end - defp msg_decode(<<type::int8, size::int32, rest::binary>>) do + defp msg_decode(<<type::int8(), size::int32(), rest::binary>>) do size = size - 4 case rest do diff --git a/.deps/postgrex/lib/postgrex/replication_connection.ex b/.deps/postgrex/lib/postgrex/replication_connection.ex @@ -550,7 +550,7 @@ defmodule Postgrex.ReplicationConnection do end defp stream_in_progress(command, mod, mod_state, from, s) do - Logger.warn("received #{command} while stream is already in progress") + Logger.warning("received #{command} while stream is already in progress") from && reply(from, {__MODULE__, :stream_in_progress}) {:noreply, %{s | state: {mod, mod_state}}} end diff --git a/.deps/postgrex/lib/postgrex/result.ex b/.deps/postgrex/lib/postgrex/result.ex @@ -28,11 +28,11 @@ end if Code.ensure_loaded?(Table.Reader) do defimpl Table.Reader, for: Postgrex.Result do def init(%{columns: columns}) when columns in [nil, []] do - {:rows, %{columns: []}, []} + {:rows, %{columns: [], count: 0}, []} end def init(result) do - {:rows, %{columns: result.columns}, result.rows} + {:rows, %{columns: result.columns, count: result.num_rows}, result.rows} end end end diff --git a/.deps/postgrex/lib/postgrex/simple_connection.ex b/.deps/postgrex/lib/postgrex/simple_connection.ex @@ -263,7 +263,7 @@ defmodule Postgrex.SimpleConnection do idle_timeout = opts[:idle_timeout] if idle_timeout do - Logger.warn( + Logger.warning( ":idle_timeout in Postgrex.SimpleConnection is deprecated, " <> "please use :idle_interval instead" ) diff --git a/.deps/postgrex/lib/postgrex/type_module.ex b/.deps/postgrex/lib/postgrex/type_module.ex @@ -161,7 +161,7 @@ defmodule Postgrex.TypeModule do defp encode_tuple(tuple, n, [oid | oids], [type | types], acc) do param = :erlang.element(n, tuple) - acc = [acc, <<oid::uint32>> | encode_value(param, type)] + acc = [acc, <<oid::uint32()>> | encode_value(param, type)] encode_tuple(tuple, n + 1, oids, types, acc) end @@ -264,13 +264,13 @@ defmodule Postgrex.TypeModule do defp encode_null(extension, :super_binary) do quote do - defp unquote(extension)(@null, _sub_oids, _sub_types), do: <<-1::int32>> + defp unquote(extension)(@null, _sub_oids, _sub_types), do: <<-1::int32()>> end end defp encode_null(extension, _) do quote do - defp unquote(extension)(@null), do: <<-1::int32>> + defp unquote(extension)(@null), do: <<-1::int32()>> end end @@ -346,7 +346,7 @@ defmodule Postgrex.TypeModule do end defp decode_rows( - <<?D, size::int32, _::int16, unquote(rest)::binary>>, + <<?D, size::int32(), _::int16(), unquote(rest)::binary>>, rem, unquote(full), unquote(rows) @@ -360,9 +360,9 @@ defmodule Postgrex.TypeModule do end end - defp decode_rows(<<?D, size::int32, rest::binary>>, rem, _, rows) do + defp decode_rows(<<?D, size::int32(), rest::binary>>, rem, _, rows) do more = size + 1 - rem - {:more, [?D, <<size::int32>> | rest], rows, more} + {:more, [?D, <<size::int32()>> | rest], rows, more} end defp decode_rows(<<?D, rest::binary>>, _, _, rows) do @@ -506,7 +506,7 @@ defmodule Postgrex.TypeModule do end defp decode_tuple( - <<oid::int32, unquote(rest)::binary>>, + <<oid::int32(), unquote(rest)::binary>>, [oid | unquote(oids)], types, unquote(n), @@ -522,7 +522,7 @@ defmodule Postgrex.TypeModule do end defp decode_tuple( - <<oid::int32, unquote(rest)::binary>>, + <<oid::int32(), unquote(rest)::binary>>, rem, types, unquote(n), @@ -685,7 +685,7 @@ defmodule Postgrex.TypeModule do defp decode_extension_null(extension, dispatch, rest, acc, rem, full, rows) do quote do defp unquote(extension)( - <<-1::int32, unquote(rest)::binary>>, + <<-1::int32(), unquote(rest)::binary>>, types, acc, unquote(rem), @@ -699,7 +699,7 @@ defmodule Postgrex.TypeModule do end end - defp unquote(extension)(<<-1::int32, rest::binary>>, acc) do + defp unquote(extension)(<<-1::int32(), rest::binary>>, acc) do unquote(extension)(rest, [@null | acc]) end @@ -707,7 +707,7 @@ defmodule Postgrex.TypeModule do acc end - defp unquote(extension)(<<-1::int32, rest::binary>>, acc, callback) do + defp unquote(extension)(<<-1::int32(), rest::binary>>, acc, callback) do unquote(extension)(rest, [@null | acc], callback) end @@ -715,7 +715,7 @@ defmodule Postgrex.TypeModule do callback.(rest, acc) end - defp unquote(extension)(<<-1::int32, rest::binary>>, oids, types, n, acc) do + defp unquote(extension)(<<-1::int32(), rest::binary>>, oids, types, n, acc) do decode_tuple(rest, oids, types, n, acc) end end @@ -872,7 +872,7 @@ defmodule Postgrex.TypeModule do defp decode_super_null(extension, dispatch, rest, acc, rem, full, rows) do quote do defp unquote(extension)( - <<-1::int32, unquote(rest)::binary>>, + <<-1::int32(), unquote(rest)::binary>>, _sub_oids, _sub_types, types, @@ -888,7 +888,7 @@ defmodule Postgrex.TypeModule do end end - defp unquote(extension)(<<-1::int32, rest::binary>>, sub_oids, sub_types, acc) do + defp unquote(extension)(<<-1::int32(), rest::binary>>, sub_oids, sub_types, acc) do unquote(extension)(rest, sub_oids, sub_types, [@null | acc]) end @@ -897,7 +897,7 @@ defmodule Postgrex.TypeModule do end defp unquote(extension)( - <<-1::int32, rest::binary>>, + <<-1::int32(), rest::binary>>, _sub_oids, _sub_types, oids, diff --git a/.deps/postgrex/lib/postgrex/types.ex b/.deps/postgrex/lib/postgrex/types.ex @@ -201,11 +201,11 @@ defmodule Postgrex.Types do defp row_decode(<<>>), do: [] - defp row_decode(<<-1::int32, rest::binary>>) do + defp row_decode(<<-1::int32(), rest::binary>>) do [nil | row_decode(rest)] end - defp row_decode(<<len::uint32, value::binary(len), rest::binary>>) do + defp row_decode(<<len::uint32(), value::binary(len), rest::binary>>) do [value | row_decode(rest)] end diff --git a/.deps/postgrex/lib/postgrex/utils.ex b/.deps/postgrex/lib/postgrex/utils.ex @@ -83,17 +83,24 @@ defmodule Postgrex.Utils do """ @spec default_opts(Keyword.t()) :: Keyword.t() def default_opts(opts) do + {field, value} = extract_host(System.get_env("PGHOST")) + opts |> Keyword.put_new(:username, System.get_env("PGUSER") || System.get_env("USER")) |> Keyword.put_new(:password, System.get_env("PGPASSWORD")) |> Keyword.put_new(:database, System.get_env("PGDATABASE")) - |> Keyword.put_new(:hostname, System.get_env("PGHOST") || "localhost") + |> Keyword.put_new(field, value) |> Keyword.put_new(:port, System.get_env("PGPORT")) |> Keyword.update!(:port, &normalize_port/1) |> Keyword.put_new(:types, Postgrex.DefaultTypes) |> Enum.reject(fn {_k, v} -> is_nil(v) end) end + defp extract_host("/" <> _ = dir), do: {:socket_dir, dir} + defp extract_host(<<d, ?:>> <> _ = dir) when d in ?a..?z or d in ?A..?Z, do: {:socket_dir, dir} + defp extract_host("@" <> abstract_socket), do: {:socket, <<0>> <> abstract_socket} + defp extract_host(host), do: {:hostname, host || "localhost"} + defp normalize_port(port) when is_binary(port), do: String.to_integer(port) defp normalize_port(port), do: port diff --git a/.deps/postgrex/mix.exs b/.deps/postgrex/mix.exs @@ -2,7 +2,7 @@ defmodule Postgrex.Mixfile do use Mix.Project @source_url "https://github.com/elixir-ecto/postgrex" - @version "0.16.3" + @version "0.16.5" def project do [ diff --git a/mix.exs b/mix.exs @@ -55,7 +55,7 @@ end defp deps() do [ # db - {:ecto_sql, "~> 3.7"}, + {:ecto_sql, "~> 3.9"}, {:postgrex, ">= 0.0.0"}, # crypto diff --git a/mix.lock b/mix.lock @@ -12,8 +12,8 @@ "decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"}, "dialyxir": {:hex, :dialyxir, "1.1.0", "c5aab0d6e71e5522e77beff7ba9e08f8e02bad90dfbeffae60eaf0cb47e29488", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "07ea8e49c45f15264ebe6d5b93799d4dd56a44036cf42d0ad9c960bc266c0b9a"}, "earmark_parser": {:hex, :earmark_parser, "1.4.25", "2024618731c55ebfcc5439d756852ec4e85978a39d0d58593763924d9a15916f", [:mix], [], "hexpm", "56749c5e1c59447f7b7a23ddb235e4b3defe276afc220a6227237f3efe83f51e"}, - "ecto": {:hex, :ecto, "3.8.4", "e06b8b87e62b27fea17fd2ff6041572ddd10339fd16cdf58446e402c6c90a74b", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f9244288b8d42db40515463a008cf3f4e0e564bb9c249fe87bf28a6d79fe82d4"}, - "ecto_sql": {:hex, :ecto_sql, "3.8.3", "a7d22c624202546a39d615ed7a6b784580391e65723f2d24f65941b4dd73d471", [:mix], [{:db_connection, "~> 2.5 or ~> 2.4.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.8.4", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.15.0 or ~> 0.16.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "348cb17fb9e6daf6f251a87049eafcb57805e2892e5e6a0f5dea0985d367329b"}, + "ecto": {:hex, :ecto, "3.9.1", "67173b1687afeb68ce805ee7420b4261649d5e2deed8fe5550df23bab0bc4396", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c80bb3d736648df790f7f92f81b36c922d9dd3203ca65be4ff01d067f54eb304"}, + "ecto_sql": {:hex, :ecto_sql, "3.9.0", "2bb21210a2a13317e098a420a8c1cc58b0c3421ab8e3acfa96417dab7817918c", [:mix], [{:db_connection, "~> 2.5 or ~> 2.4.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.9.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a8f3f720073b8b1ac4c978be25fa7960ed7fd44997420c304a4a2e200b596453"}, "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, "ex_doc": {:hex, :ex_doc, "0.28.4", "001a0ea6beac2f810f1abc3dbf4b123e9593eaa5f00dd13ded024eae7c523298", [:mix], [{:earmark_parser, "~> 1.4.19", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "bf85d003dd34911d89c8ddb8bda1a958af3471a274a4c2150a9c01c78ac3f8ed"}, "exsync": {:hex, :exsync, "0.2.4", "5cdc824553e0f4c4bf60018a9a6bbd5d3b51f93ef8401a0d8545f93127281d03", [:mix], [{:file_system, "~> 0.2", [hex: :file_system, repo: "hexpm", optional: false]}], "hexpm", "f7622d8bb98abbe473aa066ae46f91afdf7a5346b8b89728404f7189d2e80896"}, @@ -29,7 +29,7 @@ "plug": {:hex, :plug, "1.13.6", "187beb6b67c6cec50503e940f0434ea4692b19384d47e5fdfd701e93cadb4cc2", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "02b9c6b9955bce92c829f31d6284bf53c591ca63c4fb9ff81dfd0418667a34ff"}, "plug_cowboy": {:hex, :plug_cowboy, "2.5.2", "62894ccd601cf9597e2c23911ff12798a8a18d237e9739f58a6b04e4988899fe", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "ea6e87f774c8608d60c8d34022a7d073bd7680a0a013f049fc62bf35efea1044"}, "plug_crypto": {:hex, :plug_crypto, "1.2.2", "05654514ac717ff3a1843204b424477d9e60c143406aa94daf2274fdd280794d", [:mix], [], "hexpm", "87631c7ad914a5a445f0a3809f99b079113ae4ed4b867348dd9eec288cecb6db"}, - "postgrex": {:hex, :postgrex, "0.16.3", "fac79a81a9a234b11c44235a4494d8565303fa4b9147acf57e48978a074971db", [:mix], [{:connection, "~> 1.1", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "aeaae1d2d1322da4e5fe90d241b0a564ce03a3add09d7270fb85362166194590"}, + "postgrex": {:hex, :postgrex, "0.16.5", "fcc4035cc90e23933c5d69a9cd686e329469446ef7abba2cf70f08e2c4b69810", [:mix], [{:connection, "~> 1.1", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "edead639dc6e882618c01d8fc891214c481ab9a3788dfe38dd5e37fd1d5fb2e8"}, "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, }