Skip to content

Commit 4e0a1c0

Browse files
OutSquareCapitalevertlammerts
authored andcommitted
fixs:
- Builtins Literal had incorrect values for time/timestamp with time zone - typos fixes - renamed `DType` for Literals to `PyType` to keep the naming conventions consistent
1 parent e64e354 commit 4e0a1c0

File tree

4 files changed

+43
-42
lines changed

4 files changed

+43
-42
lines changed

_duckdb-stubs/__init__.pyi

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ if typing.TYPE_CHECKING:
1818
IntoExprColumn,
1919
PythonLiteral,
2020
IntoValues,
21-
IntoDType,
21+
IntoPyType,
2222
IntoFields,
23-
StrIntoDType,
23+
StrIntoPyType,
2424
JoinType,
2525
JsonCompression,
2626
JsonFormat,
@@ -210,7 +210,7 @@ class DuckDBPyConnection:
210210
def __enter__(self) -> Self: ...
211211
def __exit__(self, exc_type: object, exc: object, traceback: object) -> None: ...
212212
def append(self, table_name: str, df: pandas.DataFrame, *, by_name: bool = False) -> DuckDBPyConnection: ...
213-
def array_type(self, type: IntoDType, size: typing.SupportsInt) -> sqltypes.DuckDBPyType: ...
213+
def array_type(self, type: IntoPyType, size: typing.SupportsInt) -> sqltypes.DuckDBPyType: ...
214214
def arrow(self, rows_per_batch: typing.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader:
215215
"""Alias of to_arrow_reader(). We recommend using to_arrow_reader() instead."""
216216
...
@@ -224,8 +224,8 @@ class DuckDBPyConnection:
224224
self,
225225
name: str,
226226
function: Callable[..., PythonLiteral],
227-
parameters: lst[IntoDType] | None = None,
228-
return_type: IntoDType | None = None,
227+
parameters: lst[IntoPyType] | None = None,
228+
return_type: IntoPyType | None = None,
229229
*,
230230
type: func.PythonUDFType = ...,
231231
null_handling: func.FunctionNullHandling = ...,
@@ -235,7 +235,7 @@ class DuckDBPyConnection:
235235
def cursor(self) -> DuckDBPyConnection: ...
236236
def decimal_type(self, width: typing.SupportsInt, scale: typing.SupportsInt) -> sqltypes.DuckDBPyType: ...
237237
def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
238-
def dtype(self, type_str: StrIntoDType) -> sqltypes.DuckDBPyType: ...
238+
def dtype(self, type_str: StrIntoPyType) -> sqltypes.DuckDBPyType: ...
239239
def duplicate(self) -> DuckDBPyConnection: ...
240240
def enum_type(self, name: str, type: sqltypes.DuckDBPyType, values: lst[typing.Any]) -> sqltypes.DuckDBPyType: ...
241241
def execute(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
@@ -285,7 +285,7 @@ class DuckDBPyConnection:
285285
names: lst[str] | None = None,
286286
lineterminator: str | None = None,
287287
columns: ColumnsTypes | None = None,
288-
auto_type_candidates: lst[StrIntoDType] | None = None,
288+
auto_type_candidates: lst[StrIntoPyType] | None = None,
289289
max_line_size: int | None = None,
290290
ignore_errors: bool | None = None,
291291
store_rejects: bool | None = None,
@@ -344,9 +344,9 @@ class DuckDBPyConnection:
344344
def disable_profiling(self) -> None: ...
345345
def interrupt(self) -> None: ...
346346
def list_filesystems(self) -> lst[str]: ...
347-
def list_type(self, type: IntoDType) -> sqltypes.DuckDBPyType: ...
347+
def list_type(self, type: IntoPyType) -> sqltypes.DuckDBPyType: ...
348348
def load_extension(self, extension: str) -> None: ...
349-
def map_type(self, key: IntoDType, value: IntoDType) -> sqltypes.DuckDBPyType: ...
349+
def map_type(self, key: IntoPyType, value: IntoPyType) -> sqltypes.DuckDBPyType: ...
350350
@typing.overload
351351
def pl(
352352
self, rows_per_batch: typing.SupportsInt = 1000000, *, lazy: typing.Literal[False] = ...
@@ -386,7 +386,7 @@ class DuckDBPyConnection:
386386
names: lst[str] | None = None,
387387
lineterminator: str | None = None,
388388
columns: ColumnsTypes | None = None,
389-
auto_type_candidates: lst[StrIntoDType] | None = None,
389+
auto_type_candidates: lst[StrIntoPyType] | None = None,
390390
max_line_size: int | None = None,
391391
ignore_errors: bool | None = None,
392392
store_rejects: bool | None = None,
@@ -679,8 +679,8 @@ class DuckDBPyRelation:
679679
def rank_dense(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
680680
def row_number(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
681681
def select(self, *args: IntoExpr, groups: str = "") -> DuckDBPyRelation: ...
682-
def select_dtypes(self, types: lst[sqltypes.DuckDBPyType | StrIntoDType]) -> DuckDBPyRelation: ...
683-
def select_types(self, types: lst[sqltypes.DuckDBPyType | StrIntoDType]) -> DuckDBPyRelation: ...
682+
def select_dtypes(self, types: lst[sqltypes.DuckDBPyType | StrIntoPyType]) -> DuckDBPyRelation: ...
683+
def select_types(self, types: lst[sqltypes.DuckDBPyType | StrIntoPyType]) -> DuckDBPyRelation: ...
684684
def set_alias(self, alias: str) -> DuckDBPyRelation: ...
685685
def show(
686686
self,
@@ -1033,7 +1033,7 @@ def append(
10331033
table_name: str, df: pandas.DataFrame, *, by_name: bool = False, connection: DuckDBPyConnection | None = None
10341034
) -> DuckDBPyConnection: ...
10351035
def array_type(
1036-
type: IntoDType, size: typing.SupportsInt, *, connection: DuckDBPyConnection | None = None
1036+
type: IntoPyType, size: typing.SupportsInt, *, connection: DuckDBPyConnection | None = None
10371037
) -> sqltypes.DuckDBPyType: ...
10381038
@typing.overload
10391039
def arrow(
@@ -1062,8 +1062,8 @@ def connect(
10621062
def create_function(
10631063
name: str,
10641064
function: Callable[..., PythonLiteral],
1065-
parameters: lst[IntoDType] | None = None,
1066-
return_type: IntoDType | None = None,
1065+
parameters: lst[IntoPyType] | None = None,
1066+
return_type: IntoPyType | None = None,
10671067
*,
10681068
type: func.PythonUDFType = ...,
10691069
null_handling: func.FunctionNullHandling = ...,
@@ -1084,7 +1084,7 @@ def df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None =
10841084
@typing.overload
10851085
def df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
10861086
def distinct(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1087-
def dtype(type_str: StrIntoDType, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1087+
def dtype(type_str: StrIntoPyType, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
10881088
def duplicate(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
10891089
def enum_type(
10901090
name: str,
@@ -1172,7 +1172,7 @@ def from_csv_auto(
11721172
names: lst[str] | None = None,
11731173
lineterminator: str | None = None,
11741174
columns: ColumnsTypes | None = None,
1175-
auto_type_candidates: lst[StrIntoDType] | None = None,
1175+
auto_type_candidates: lst[StrIntoPyType] | None = None,
11761176
max_line_size: int | None = None,
11771177
ignore_errors: bool | None = None,
11781178
store_rejects: bool | None = None,
@@ -1246,10 +1246,10 @@ def get_profiling_information(*, connection: DuckDBPyConnection | None = None, f
12461246
def enable_profiling(*, connection: DuckDBPyConnection | None = None) -> None: ...
12471247
def disable_profiling(*, connection: DuckDBPyConnection | None = None) -> None: ...
12481248
def list_filesystems(*, connection: DuckDBPyConnection | None = None) -> lst[str]: ...
1249-
def list_type(type: IntoDType, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1249+
def list_type(type: IntoPyType, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
12501250
def load_extension(extension: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
12511251
def map_type(
1252-
key: IntoDType, value: IntoDType, *, connection: DuckDBPyConnection | None = None
1252+
key: IntoPyType, value: IntoPyType, *, connection: DuckDBPyConnection | None = None
12531253
) -> sqltypes.DuckDBPyType: ...
12541254
def order(
12551255
df: pandas.DataFrame, order_expr: str, *, connection: DuckDBPyConnection | None = None
@@ -1319,7 +1319,7 @@ def read_csv(
13191319
names: lst[str] | None = None,
13201320
lineterminator: str | None = None,
13211321
columns: ColumnsTypes | None = None,
1322-
auto_type_candidates: lst[StrIntoDType] | None = None,
1322+
auto_type_candidates: lst[StrIntoPyType] | None = None,
13231323
max_line_size: int | None = None,
13241324
ignore_errors: bool | None = None,
13251325
store_rejects: bool | None = None,

_duckdb-stubs/_expression.pyi

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import TYPE_CHECKING, Any, overload
22

33
if TYPE_CHECKING:
4-
from ._typing import IntoExpr, IntoDType
4+
from ._typing import IntoExpr, IntoPyType
55

66
class Expression:
77
def __add__(self, other: IntoExpr) -> Expression: ...
@@ -39,7 +39,7 @@ class Expression:
3939
def alias(self, name: str) -> Expression: ...
4040
def asc(self) -> Expression: ...
4141
def between(self, lower: IntoExpr, upper: IntoExpr) -> Expression: ...
42-
def cast(self, type: IntoDType) -> Expression: ...
42+
def cast(self, type: IntoPyType) -> Expression: ...
4343
def collate(self, collation: str) -> Expression: ...
4444
def desc(self) -> Expression: ...
4545
def get_name(self) -> str: ...

_duckdb-stubs/_sqltypes.pyi

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import duckdb
22
import typing
3-
from ._typing import StrIntoDType, DTypeIdentifiers
3+
from ._typing import StrIntoPyType, PyTypeIds
44

55
__all__: list[str] = [
66
"BIGINT",
@@ -40,13 +40,13 @@ class DuckDBPyType:
4040
def __getitem__(self, name: str) -> DuckDBPyType: ...
4141
def __hash__(self) -> int: ...
4242
@typing.overload
43-
def __init__(self, type_str: StrIntoDType, connection: duckdb.DuckDBPyConnection) -> None: ...
43+
def __init__(self, type_str: StrIntoPyType, connection: duckdb.DuckDBPyConnection) -> None: ...
4444
@typing.overload
4545
def __init__(self, obj: object) -> None: ...
4646
@property
4747
def children(self) -> list[tuple[str, DuckDBPyType | int | list[str]]]: ...
4848
@property
49-
def id(self) -> DTypeIdentifiers: ...
49+
def id(self) -> PyTypeIds: ...
5050

5151
BIGINT: DuckDBPyType # value = BIGINT
5252
BIT: DuckDBPyType # value = BIT

_duckdb-stubs/_typing.pyi

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class NPScalarTypeLike(NPProtocol, Protocol):
3636
class NPArrayLike(NPProtocol, Generic[_S_co, _D_co], Protocol):
3737
"""`numpy.ndarray` Protocol.
3838
39-
This is needed to accept numpy arrays as literals in expressions, without emitting type checker errors about unknown symbol if the user doesn't have numpy installed.
39+
This is needed to accept numpy arrays as literals in expressions, without emitting type checker errors about unknown symbol if the user doesn't have `numpy` installed.
4040
4141
Note:
4242
Using `np.typing.NDArray` is still the best option for return types.
@@ -73,7 +73,7 @@ NonNestedLiteral: TypeAlias = ScalarLiteral | TemporalLiteral | UUID | Decimal |
7373
NestedLiteral: TypeAlias = list[Any] | tuple[Any, ...] | dict[Any, Any] | NPArrayLike[Any, Any]
7474
"""Containers types that can be converted to a nested `ConstantExpression` (e.g. to `ARRAY` or `STRUCT`).
7575
76-
Those types can be aribtraly nested, as long as their leaf values are `PythonLiteral`."""
76+
Those types can be arbitrarily nested, as long as their leaf values are `PythonLiteral`."""
7777

7878
PythonLiteral: TypeAlias = NonNestedLiteral | NestedLiteral | None
7979
"""Python objects that can be converted to a `ConstantExpression`."""
@@ -89,8 +89,9 @@ See Also:
8989
"""
9090

9191
IntoValues: TypeAlias = list[PythonLiteral] | tuple[Expression, ...] | Expression
92-
"""Types that can be converted to a table."""
93-
# Datatypes conversions
92+
"""Types that can be converted to a table of values."""
93+
94+
# PyType conversions
9495

9596
Builtins: TypeAlias = Literal[
9697
"bigint",
@@ -106,12 +107,12 @@ Builtins: TypeAlias = Literal[
106107
"interval",
107108
"smallint",
108109
"null",
109-
"time_tz",
110+
"time with time zone",
110111
"time",
111112
"timestamp_ms",
112113
"timestamp_ns",
113114
"timestamp_s",
114-
"timestamp_tz",
115+
"timestamp with time zone",
115116
"timestamp",
116117
"tinyint",
117118
"ubigint",
@@ -122,7 +123,7 @@ Builtins: TypeAlias = Literal[
122123
"uuid",
123124
"varchar",
124125
]
125-
"""Literals strings convertibles into `DuckDBPyType` instances.
126+
"""Literals `str` that can be converted into `DuckDBPyType` instances.
126127
127128
Note:
128129
Passing the same values in uppercase is also accepted.
@@ -132,22 +133,22 @@ Note:
132133
NestedIds: TypeAlias = Literal["list", "struct", "array", "enum", "map", "decimal", "union"]
133134
"""Identifiers for nested types in `DuckDBPyType.id`."""
134135

135-
DTypeIdentifiers: TypeAlias = Builtins | NestedIds
136+
PyTypeIds: TypeAlias = Builtins | NestedIds
136137
"""All possible identifiers for `DuckDBPyType.id`."""
137138

138-
StrIntoDType = Builtins | Literal["json"] | str
139+
StrIntoPyType = Builtins | Literal["json"] | str
139140
"""Any `str` that can be converted into a `DuckDBPyType`.
140141
141-
The dtypes not present in the literal values are the composed ones, like `STRUCT` or `DECIMAL`.
142+
The pytypes not present in the literal values are the composed ones, like `STRUCT` or `DECIMAL`.
142143
143144
Note:
144145
A `StrEnum` will be handled the same way as a `str`."""
145146

146147
# NOTE:
147148
# the `dict` and `list` types are `Any` due to the same limitation mentionned in `NestedLiteral`.
148-
IntoDType: TypeAlias = (
149+
IntoPyType: TypeAlias = (
149150
DuckDBPyType
150-
| StrIntoDType
151+
| StrIntoPyType
151152
| type[NPScalarTypeLike]
152153
| type[ScalarLiteral]
153154
| type[list[Any]]
@@ -164,7 +165,7 @@ See Also:
164165

165166
# NOTE: here we keep the covariance "hack" and warn the user in the docstring,
166167
# because otherwise we can just resort to `Any` for the `dict` and `list` types.
167-
IntoFields: TypeAlias = Mapping[str, IntoDType] | Sequence[IntoDType]
168+
IntoFields: TypeAlias = Mapping[str, IntoPyType] | Sequence[IntoPyType]
168169
"""Types that can be converted either into:
169170
170171
- a nested `DuckDBPyType` (e.g. `STRUCT` or `UNION`)
@@ -177,9 +178,9 @@ Warning:
177178

178179
# Files related
179180

180-
# NOTE: ideally HiveTypes should also be accepted as a Mapping[str, StrIntoDType].
181-
ColumnsTypes: TypeAlias = Mapping[str, StrIntoDType]
182-
HiveTypes: TypeAlias = dict[str, StrIntoDType]
181+
# NOTE: ideally HiveTypes should also be accepted as a Mapping[str, StrIntoPyType].
182+
ColumnsTypes: TypeAlias = Mapping[str, StrIntoPyType]
183+
HiveTypes: TypeAlias = dict[str, StrIntoPyType]
183184
ParquetFieldIdsType: TypeAlias = Mapping[str, int | ParquetFieldIdsType]
184185

185186
_Auto: TypeAlias = Literal["auto"]
@@ -196,7 +197,7 @@ CsvEncoding: TypeAlias = Literal["utf-8", "utf-16", "latin-1"] | str
196197
All availables options not in the literal values can be seen here:
197198
https://duckdb.org/docs/stable/core_extensions/encodings
198199
"""
199-
JsonCompression: TypeAlias = _CompressionOptions | Literal["auto_detect"]
200+
JsonCompression: TypeAlias = Literal["auto_detect"] | _CompressionOptions
200201
JsonFormat: TypeAlias = _Auto | Literal["unstructured", "newline_delimited", "array"]
201202
JsonRecordOptions: TypeAlias = _Auto | Literal["true", "false"]
202203

0 commit comments

Comments
 (0)