From fc33e5c79908203a7b262e0c9ce4e1bafce090ad Mon Sep 17 00:00:00 2001 From: Martin Odersky Date: Fri, 26 Apr 2019 16:50:57 +0200 Subject: [PATCH] Trial: replace implied-for by instance-of --- .../contextual-instance/context-bounds.md | 30 ++ .../contextual-instance/conversions.md | 75 ++++ .../contextual-instance/derivation.md | 382 ++++++++++++++++++ .../contextual-instance/extension-methods.md | 150 +++++++ .../contextual-instance/import-implied.md | 53 +++ .../inferable-by-name-parameters.md | 66 +++ .../contextual-instance/inferable-params.md | 111 +++++ .../contextual-instance/instance-defs.md | 78 ++++ .../contextual-instance/motivation.md | 81 ++++ .../multiversal-equality.md | 217 ++++++++++ .../contextual-instance/query-types-spec.md | 79 ++++ .../contextual-instance/query-types.md | 160 ++++++++ .../relationship-implicits.md | 169 ++++++++ .../contextual-instance/typeclasses.md | 64 +++ 14 files changed, 1715 insertions(+) create mode 100644 docs/docs/reference/contextual-instance/context-bounds.md create mode 100644 docs/docs/reference/contextual-instance/conversions.md create mode 100644 docs/docs/reference/contextual-instance/derivation.md create mode 100644 docs/docs/reference/contextual-instance/extension-methods.md create mode 100644 docs/docs/reference/contextual-instance/import-implied.md create mode 100644 docs/docs/reference/contextual-instance/inferable-by-name-parameters.md create mode 100644 docs/docs/reference/contextual-instance/inferable-params.md create mode 100644 docs/docs/reference/contextual-instance/instance-defs.md create mode 100644 docs/docs/reference/contextual-instance/motivation.md create mode 100644 docs/docs/reference/contextual-instance/multiversal-equality.md create mode 100644 docs/docs/reference/contextual-instance/query-types-spec.md create mode 100644 docs/docs/reference/contextual-instance/query-types.md create mode 100644 docs/docs/reference/contextual-instance/relationship-implicits.md create mode 100644 docs/docs/reference/contextual-instance/typeclasses.md diff --git a/docs/docs/reference/contextual-instance/context-bounds.md b/docs/docs/reference/contextual-instance/context-bounds.md new file mode 100644 index 000000000000..3458c5cf6cd1 --- /dev/null +++ b/docs/docs/reference/contextual-instance/context-bounds.md @@ -0,0 +1,30 @@ +--- +layout: doc-page +title: "Context Bounds" +--- + +## Context Bounds + +A context bound is a shorthand for expressing a common pattern of an inferable parameter that depends on a type parameter. Using a context bound, the `maximum` function of the last section can be written like this: +```scala +def maximum[T: Ord](xs: List[T]): T = xs.reduceLeft(max) +``` +A bound like `: Ord` on a type parameter `T` of a method or class indicates an inferable parameter `given Ord[T]`. The inferable parameter(s) generated from context bounds come last in the definition of the containing method or class. E.g., +```scala +def f[T: C1 : C2, U: C3](x: T) given (y: U, z: V): R +``` +would expand to +```scala +def f[T, U](x: T) given (y: U, z: V) given C1[T], C2[T], C3[U]: R +``` +Context bounds can be combined with subtype bounds. If both are present, subtype bounds come first, e.g. +```scala +def g[T <: B : C](x: T): R = ... +``` + +## Syntax + +``` +TypeParamBounds ::= [SubtypeBounds] {ContextBound} +ContextBound ::= ‘:’ Type +``` diff --git a/docs/docs/reference/contextual-instance/conversions.md b/docs/docs/reference/contextual-instance/conversions.md new file mode 100644 index 000000000000..31e61d928cbc --- /dev/null +++ b/docs/docs/reference/contextual-instance/conversions.md @@ -0,0 +1,75 @@ +--- +layout: doc-page +title: "Implicit Conversions" +--- + +Implicit conversions are defined by implicit instances of the `scala.Conversion` class. +This class is defined in package `scala` as follows: +```scala +abstract class Conversion[-T, +U] extends (T => U) +``` +For example, here is an implicit conversion from `String` to `Token`: +```scala +instance of Conversion[String, Token] { + def apply(str: String): Token = new KeyWord(str) +} +``` +Using an instance alias, this can be expressed more concisely as: +```scala +instance of Conversion[String, Token] = new KeyWord(_) +``` +An implicit conversion is applied automatically by the compiler in three situations: + +1. If an expression `e` has type `T`, and `T` does not conform to the expression's expected type `S`. +2. In a selection `e.m` with `e` of type `T`, but `T` defines no member `m`. +3. In an application `e.m(args)` with `e` of type `T`, if `T` does define + some member(s) named `m`, but none of these members can be applied to the arguments `args`. + +In the first case, the compiler looks for an implicit instance of class +`scala.Conversion` that maps an argument of type `T` to type `S`. In the second and third +case, it looks for an implicit instance of class `scala.Conversion` that maps an argument of type `T` +to a type that defines a member `m` which can be applied to `args` if present. +If such an instance `C` is found, the expression `e` is replaced by `C.apply(e)`. + +## Examples + +1. The `Predef` package contains "auto-boxing" conversions that map +primitive number types to subclasses of `java.lang.Number`. For instance, the +conversion from `Int` to `java.lang.Integer` can be defined as follows: +```scala +instance int2Integer of Conversion[Int, java.lang.Integer] = + java.lang.Integer.valueOf(_) +``` + +2. The "magnet" pattern is sometimes used to express many variants of a method. Instead of defining overloaded versions of the method, one can also let the method take one or more arguments of specially defined "magnet" types, into which various argument types can be converted. E.g. +```scala +object Completions { + + // The argument "magnet" type + enum CompletionArg { + case Error(s: String) + case Response(f: Future[HttpResponse]) + case Status(code: Future[StatusCode]) + } + object CompletionArg { + + // conversions defining the possible arguments to pass to `complete` + // these always come with CompletionArg + // They can be invoked explicitly, e.g. + // + // CompletionArg.fromStatusCode(statusCode) + + instance fromString of Conversion[String, CompletionArg] = Error(_) + instance fromFuture of Conversion[Future[HttpResponse], CompletionArg] = Response(_) + instance fromStatusCode of Conversion[Future[StatusCode], CompletionArg] = Status(_) + } + import CompletionArg._ + + def complete[T](arg: CompletionArg) = arg match { + case Error(s) => ... + case Response(f) => ... + case Status(code) => ... + } +} +``` +This setup is more complicated than simple overloading of `complete`, but it can still be useful if normal overloading is not available (as in the case above, since we cannot have two overloaded methods that take `Future[...]` arguments), or if normal overloading would lead to a combinatorial explosion of variants. diff --git a/docs/docs/reference/contextual-instance/derivation.md b/docs/docs/reference/contextual-instance/derivation.md new file mode 100644 index 000000000000..33a7516ad8a3 --- /dev/null +++ b/docs/docs/reference/contextual-instance/derivation.md @@ -0,0 +1,382 @@ +--- +layout: doc-page +title: Typeclass Derivation +--- + +Typeclass derivation is a way to generate instances of certain type classes automatically or with minimal code hints. A type class in this sense is any trait or class with a type parameter that describes the type being operated on. Commonly used examples are `Eql`, `Ordering`, `Show`, or `Pickling`. Example: +```scala +enum Tree[T] derives Eql, Ordering, Pickling { + case Branch(left: Tree[T], right: Tree[T]) + case Leaf(elem: T) +} +``` +The `derives` clause generates evidence for the `Eql`, `Ordering`, and `Pickling` traits in the companion object `Tree`: +```scala +evidence [T: Eql] for Eql[Tree[T]] = Eql.derived +evidence [T: Ordering] for Ordering[Tree[T]] = Ordering.derived +evidence [T: Pickling] for Pickling[Tree[T]] = Pickling.derived +``` + +### Deriving Types + +Besides for `enums`, typeclasses can also be derived for other sets of classes and objects that form an algebraic data type. These are: + + - individual case classes or case objects + - sealed classes or traits that have only case classes and case objects as children. + + Examples: + + ```scala +case class Labelled[T](x: T, label: String) derives Eql, Show + +sealed trait Option[T] derives Eql +case class Some[T] extends Option[T] +case object None extends Option[Nothing] +``` + +The generated typeclass instances are placed in the companion objects `Labelled` and `Option`, respectively. + +### Derivable Types + +A trait or class can appear in a `derives` clause if its companion object defines a method named `derived`. The type and implementation of a `derived` method are arbitrary, but typically it has a definition like this: +```scala + def derived[T] given Generic[T] = ... +``` +That is, the `derived` method takes an inferable parameter of type `Generic` that determines the _shape_ of the deriving type `T` and it computes the typeclass implementation according to that shape. Evidence for `Generic` is generated automatically for any type that derives a typeclass with a `derived` +method that refers to `Generic`. One can also derive `Generic` alone, which means a `Generic` instance is generated without any other type class instances. E.g.: +```scala +sealed trait ParseResult[T] derives Generic +``` +This is all a user of typeclass derivation has to know. The rest of this page contains information needed to be able to write a typeclass that can appear in a `derives` clause. In particular, it details the means provided for the implementation of data generic `derived` methods. + +### The Shape Type + +For every class with a `derives` clause, the compiler computes the shape of that class as a type. For example, here is the shape type for the `Tree[T]` enum: +```scala +Cases[( + Case[Branch[T], (Tree[T], Tree[T])], + Case[Leaf[T], T *: Unit] +)] +``` +Informally, this states that + +> The shape of a `Tree[T]` is one of two cases: Either a `Branch[T]` with two + elements of type `Tree[T]`, or a `Leaf[T]` with a single element of type `T`. + +The type constructors `Cases` and `Case` come from the companion object of a class +`scala.compiletime.Shape`, which is defined in the standard library as follows: +```scala +sealed abstract class Shape + +object Shape { + + /** A sum with alternative types `Alts` */ + case class Cases[Alts <: Tuple] extends Shape + + /** A product type `T` with element types `Elems` */ + case class Case[T, Elems <: Tuple] extends Shape +} +``` + +Here is the shape type for `Labelled[T]`: +```scala +Case[Labelled[T], (T, String)] +``` +And here is the one for `Option[T]`: +```scala +Cases[( + Case[Some[T], T *: Unit], + Case[None.type, Unit] +)] +``` +Note that an empty element tuple is represented as type `Unit`. A single-element tuple +is represented as `T *: Unit` since there is no direct syntax for such tuples: `(T)` is just `T` in parentheses, not a tuple. + +### The Generic Typeclass + +For every class `C[T_1,...,T_n]` with a `derives` clause, the compiler generates in the companion object of `C` evidence for `Generic[C[T_1,...,T_n]]` that follows the outline below: +```scala +evidence [T_1, ..., T_n] for Generic[C[T_1,...,T_n]] { + type Shape = ... + ... +} +``` +where the right hand side of `Shape` is the shape type of `C[T_1,...,T_n]`. +For instance, the definition +```scala +enum Result[+T, +E] derives Logging { + case class Ok[T](result: T) + case class Err[E](err: E) +} +``` +would produce: +```scala +object Result { + import scala.compiletime.Shape._ + + evidence [T, E] for Generic[Result[T, E]] { + type Shape = Cases[( + Case[Ok[T], T *: Unit], + Case[Err[E], E *: Unit] + )] + ... + } +} +``` +The `Generic` class is defined in package `scala.reflect`. + +```scala +abstract class Generic[T] { + type Shape <: scala.compiletime.Shape + + /** The mirror corresponding to ADT instance `x` */ + def reflect(x: T): Mirror + + /** The ADT instance corresponding to given `mirror` */ + def reify(mirror: Mirror): T + + /** The companion object of the ADT */ + def common: GenericClass +} +``` +It defines the `Shape` type for the ADT `T`, as well as two methods that map between a +type `T` and a generic representation of `T`, which we call a `Mirror`: +The `reflect` method maps an instance value of the ADT `T` to its mirror whereas +the `reify` method goes the other way. There's also a `common` method that returns +a value of type `GenericClass` which contains information that is the same for all +instances of a class (right now, this consists of the runtime `Class` value and +the names of the cases and their parameters). + +### Mirrors + +A mirror is a generic representation of an instance value of an ADT. `Mirror` objects have three components: + + - `adtClass: GenericClass`: The representation of the ADT class + - `ordinal: Int`: The ordinal number of the case among all cases of the ADT, starting from 0 + - `elems: Product`: The elements of the instance, represented as a `Product`. + + The `Mirror` class is defined in package `scala.reflect` as follows: + +```scala +class Mirror(val adtClass: GenericClass, val ordinal: Int, val elems: Product) { + + /** The `n`'th element of this generic case */ + def apply(n: Int): Any = elems.productElement(n) + + /** The name of the constructor of the case reflected by this mirror */ + def caseLabel: String = adtClass.label(ordinal)(0) + + /** The label of the `n`'th element of the case reflected by this mirror */ + def elementLabel(n: Int): String = adtClass.label(ordinal)(n + 1) +} +``` + +### GenericClass + +Here's the API of `scala.reflect.GenericClass`: + +```scala +class GenericClass(val runtimeClass: Class[_], labelsStr: String) { + + /** A mirror of case with ordinal number `ordinal` and elements as given by `Product` */ + def mirror(ordinal: Int, product: Product): Mirror = + new Mirror(this, ordinal, product) + + /** A mirror with elements given as an array */ + def mirror(ordinal: Int, elems: Array[AnyRef]): Mirror = + mirror(ordinal, new ArrayProduct(elems)) + + /** A mirror with an initial empty array of `numElems` elements, to be filled in. */ + def mirror(ordinal: Int, numElems: Int): Mirror = + mirror(ordinal, new Array[AnyRef](numElems)) + + /** A mirror of a case with no elements */ + def mirror(ordinal: Int): Mirror = + mirror(ordinal, EmptyProduct) + + /** Case and element labels as a two-dimensional array. + * Each row of the array contains a case label, followed by the labels of the elements of that case. + */ + val label: Array[Array[String]] = ... +} +``` + +The class provides four overloaded methods to create mirrors. The first of these is invoked by the `reify` method that maps an ADT instance to its mirror. It simply passes the +instance itself (which is a `Product`) to the second parameter of the mirror. That operation does not involve any copying and is thus quite efficient. The second and third versions of `mirror` are typically invoked by typeclass methods that create instances from mirrors. An example would be an `unpickle` method that first creates an array of elements, then creates +a mirror over that array, and finally uses the `reify` method in `Reflected` to create the ADT instance. The fourth version of `mirror` is used to create mirrors of instances that do not have any elements. + +### How to Write Generic Typeclasses + +Based on the machinery developed so far it becomes possible to define type classes generically. This means that the `derived` method will compute a type class instance for any ADT that has a `Generic` instance, recursively. +The implementation of these methods typically uses three new type-level constructs in Dotty: inline methods, inline matches, and implicit matches. As an example, here is one possible implementation of a generic `Eql` type class, with explanations. Let's assume `Eql` is defined by the following trait: +```scala +trait Eql[T] { + def eql(x: T, y: T): Boolean +} +``` +We need to implement a method `Eql.derived` that produces an instance of `Eql[T]` provided +there exists evidence of type `Generic[T]`. Here's a possible solution: +```scala + inline def derived[T] given (ev: Generic[T]): Eql[T] = new Eql[T] { + def eql(x: T, y: T): Boolean = { + val mx = ev.reflect(x) // (1) + val my = ev.reflect(y) // (2) + inline erasedValue[ev.Shape] match { + case _: Cases[alts] => + mx.ordinal == my.ordinal && // (3) + eqlCases[alts](mx, my, 0) // [4] + case _: Case[_, elems] => + eqlElems[elems](mx, my, 0) // [5] + } + } + } +``` +The implementation of the inline method `derived` creates an instance of `Eql[T]` and implements its `eql` method. The right-hand side of `eql` mixes compile-time and runtime elements. In the code above, runtime elements are marked with a number in parentheses, i.e +`(1)`, `(2)`, `(3)`. Compile-time calls that expand to runtime code are marked with a number in brackets, i.e. `[4]`, `[5]`. The implementation of `eql` consists of the following steps. + + 1. Map the compared values `x` and `y` to their mirrors using the `reflect` method of the implicitly passed `Generic` evidence `(1)`, `(2)`. + 2. Match at compile-time against the shape of the ADT given in `ev.Shape`. Dotty does not have a construct for matching types directly, but we can emulate it using an `inline` match over an `erasedValue`. Depending on the actual type `ev.Shape`, the match will reduce at compile time to one of its two alternatives. + 3. If `ev.Shape` is of the form `Cases[alts]` for some tuple `alts` of alternative types, the equality test consists of comparing the ordinal values of the two mirrors `(3)` and, if they are equal, comparing the elements of the case indicated by that ordinal value. That second step is performed by code that results from the compile-time expansion of the `eqlCases` call `[4]`. + 4. If `ev.Shape` is of the form `Case[elems]` for some tuple `elems` for element types, the elements of the case are compared by code that results from the compile-time expansion of the `eqlElems` call `[5]`. + +Here is a possible implementation of `eqlCases`: +```scala + inline def eqlCases[Alts <: Tuple](mx: Mirror, my: Mirror, n: Int): Boolean = + inline erasedValue[Alts] match { + case _: (Shape.Case[_, elems] *: alts1) => + if (mx.ordinal == n) // (6) + eqlElems[elems](mx, my, 0) // [7] + else + eqlCases[alts1](mx, my, n + 1) // [8] + case _: Unit => + throw new MatchError(mx.ordinal) // (9) + } +``` +The inline method `eqlCases` takes as type arguments the alternatives of the ADT that remain to be tested. It takes as value arguments mirrors of the two instances `x` and `y` to be compared and an integer `n` that indicates the ordinal number of the case that is tested next. It produces an expression that compares these two values. + +If the list of alternatives `Alts` consists of a case of type `Case[_, elems]`, possibly followed by further cases in `alts1`, we generate the following code: + + 1. Compare the `ordinal` value of `mx` (a runtime value) with the case number `n` (a compile-time value translated to a constant in the generated code) in an if-then-else `(6)`. + 2. In the then-branch of the conditional we have that the `ordinal` value of both mirrors + matches the number of the case with elements `elems`. Proceed by comparing the elements + of the case in code expanded from the `eqlElems` call `[7]`. + 3. In the else-branch of the conditional we have that the present case does not match + the ordinal value of both mirrors. Proceed by trying the remaining cases in `alts1` using + code expanded from the `eqlCases` call `[8]`. + + If the list of alternatives `Alts` is the empty tuple, there are no further cases to check. + This place in the code should not be reachable at runtime. Therefore an appropriate + implementation is by throwing a `MatchError` or some other runtime exception `(9)`. + +The `eqlElems` method compares the elements of two mirrors that are known to have the same +ordinal number, which means they represent the same case of the ADT. Here is a possible +implementation: +```scala + inline def eqlElems[Elems <: Tuple](xs: Mirror, ys: Mirror, n: Int): Boolean = + inline erasedValue[Elems] match { + case _: (elem *: elems1) => + tryEql[elem]( // [12] + xs(n).asInstanceOf[elem], // (10) + ys(n).asInstanceOf[elem]) && // (11) + eqlElems[elems1](xs, ys, n + 1) // [13] + case _: Unit => + true // (14) + } +``` +`eqlElems` takes as arguments the two mirrors of the elements to compare and a compile-time index `n`, indicating the index of the next element to test. It is defined in terms of another compile-time match, this time over the tuple type `Elems` of all element types that remain to be tested. If that type is +non-empty, say of form `elem *: elems1`, the following code is produced: + + 1. Access the `n`'th elements of both mirrors and cast them to the current element type `elem` + `(10)`, `(11)`. Note that because of the way runtime reflection mirrors compile-time `Shape` types, the casts are guaranteed to succeed. + 2. Compare the element values using code expanded by the `tryEql` call `[12]`. + 3. "And" the result with code that compares the remaining elements using a recursive call + to `eqlElems` `[13]`. + + If type `Elems` is empty, there are no more elements to be compared, so the comparison's result is `true`. `(14)` + + Since `eqlElems` is an inline method, its recursive calls are unrolled. The end result is a conjunction `test_1 && ... && test_n && true` of test expressions produced by the `tryEql` calls. + +The last, and in a sense most interesting part of the derivation is the comparison of a pair of element values in `tryEql`. Here is the definition of this method: +```scala + inline def tryEql[T](x: T, y: T) = implicit match { + case ev: Eql[T] => + ev.eql(x, y) // (15) + case _ => + error("No `Eql` instance was found for $T") + } +``` +`tryEql` is an inline method that takes an element type `T` and two element values of that type as arguments. It is defined using an `evidence match` that tries to find evidence for `Eql[T]`. If an instance `ev` is found, it proceeds by comparing the arguments using `ev.eql`. On the other hand, if no instance is found +this signals a compilation error: the user tried a generic derivation of `Eql` for a class with an element type that does not support an `Eql` instance itself. The error is signaled by +calling the `error` method defined in `scala.compiletime`. + +**Note:** At the moment our error diagnostics for metaprogramming does not support yet interpolated string arguments for the `scala.compiletime.error` method that is called in the second case above. As an alternative, one can simply leave off the second case, then a missing typeclass would result in a "failure to reduce match" error. + +**Example:** Here is a slightly polished and compacted version of the code that's generated by inline expansion for the derived `Eql` instance of class `Tree`. + +```scala +evidence [T] given (elemEq: Eql[T]) for Eql[Tree[T]] { + def eql(x: Tree[T], y: Tree[T]): Boolean = { + val ev = the[Generic[Tree[T]]] + val mx = ev.reflect(x) + val my = ev.reflect(y) + mx.ordinal == my.ordinal && { + if (mx.ordinal == 0) { + this.eql(mx(0).asInstanceOf[Tree[T]], my(0).asInstanceOf[Tree[T]]) && + this.eql(mx(1).asInstanceOf[Tree[T]], my(1).asInstanceOf[Tree[T]]) + } + else if (mx.ordinal == 1) { + elemEq.eql(mx(0).asInstanceOf[T], my(0).asInstanceOf[T]) + } + else throw new MatchError(mx.ordinal) + } + } +} +``` + +One important difference between this approach and Scala-2 typeclass derivation frameworks such as Shapeless or Magnolia is that no automatic attempt is made to generate typeclass instances of elements recursively using the generic derivation framework. There must be an evidence value of type `Eql[T]` (which can of course be produced in turn using `Eql.derived`), or the compilation will fail. The advantage of this more restrictive approach to typeclass derivation is that it avoids uncontrolled transitive typeclass derivation by design. This keeps code sizes smaller, compile times lower, and is generally more predictable. + +### Derived Instances Elsewhere + +Sometimes one would like to derive a typeclass instance for an ADT after the ADT is defined, without being able to change the code of the ADT itself. +To do this, simply define an instance with the `derived` method of the typeclass as right-hand side. E.g, to implement `Ordering` for `Option`, define: +```scala +evidence [T: Ordering]: Ordering[Option[T]] = Ordering.derived +``` +Usually, the `Ordering.derived` clause has an inferable parameter of type +`Generic[Option[T]]`. Since the `Option` trait has a `derives` clause, +the necessary evidence is already present in the companion object of `Option`. +If the ADT in question does not have a `derives` clause, evidence for `Generic` +would still be synthesized by the compiler at the point where `derived` is called. +This is similar to the situation with type tags or class tags: If no evidence is found, the compiler will synthesize it. + +### Syntax + +``` +Template ::= InheritClauses [TemplateBody] +EnumDef ::= id ClassConstr InheritClauses EnumBody +InheritClauses ::= [‘extends’ ConstrApps] [‘derives’ QualId {‘,’ QualId}] +ConstrApps ::= ConstrApp {‘with’ ConstrApp} + | ConstrApp {‘,’ ConstrApp} +``` + +### Discussion + +The typeclass derivation framework is quite small and low-level. There are essentially +two pieces of infrastructure in the compiler-generated `Generic` instances: + + - a type representing the shape of an ADT, + - a way to map between ADT instances and generic mirrors. + +Generic mirrors make use of the already existing `Product` infrastructure for case +classes, which means they are efficient and their generation requires not much code. + +Generic mirrors can be so simple because, just like `Product`s, they are weakly +typed. On the other hand, this means that code for generic typeclasses has to +ensure that type exploration and value selection proceed in lockstep and it +has to assert this conformance in some places using casts. If generic typeclasses +are correctly written these casts will never fail. + +It could make sense to explore a higher-level framework that encapsulates all casts +in the framework. This could give more guidance to the typeclass implementer. +It also seems quite possible to put such a framework on top of the lower-level +mechanisms presented here. diff --git a/docs/docs/reference/contextual-instance/extension-methods.md b/docs/docs/reference/contextual-instance/extension-methods.md new file mode 100644 index 000000000000..779b0b2a47ea --- /dev/null +++ b/docs/docs/reference/contextual-instance/extension-methods.md @@ -0,0 +1,150 @@ +--- +layout: doc-page +title: "Extension Methods" +--- + +Extension methods allow one to add methods to a type after the type is defined. Example: + +```scala +case class Circle(x: Double, y: Double, radius: Double) + +def (c: Circle) circumference: Double = c.radius * math.Pi * 2 +``` + +Like regular methods, extension methods can be invoked with infix `.`: + +```scala + val circle = Circle(0, 0, 1) + circle.circumference +``` + +### Translation of Extension Methods + +Extension methods are methods that have a parameter clause in front of the defined +identifier. They translate to methods where the leading parameter section is moved +to after the defined identifier. So, the definition of `circumference` above translates +to the plain method, and can also be invoked as such: +```scala +def circumference(c: Circle): Double = c.radius * math.Pi * 2 + +assert(circle.circumference == circumference(circle)) +``` + +### Translation of Calls to Extension Methods + +When is an extension method applicable? There are two possibilities. + + - An extension method is applicable if it is visible under a simple name, by being defined + or inherited or imported in a scope enclosing the application. + - An extension method is applicable if it is a member of some evidence value at the point of the application. + +As an example, consider an extension method `longestStrings` on `String` defined in a trait `StringSeqOps`. + +```scala +trait StringSeqOps { + def (xs: Seq[String]) longestStrings = { + val maxLength = xs.map(_.length).max + xs.filter(_.length == maxLength) + } +} +``` +We can make the extension method available by defining evidence for `StringSeqOps`, like this: +```scala +instance ops1 of StringSeqOps +``` +Then +```scala +List("here", "is", "a", "list").longestStrings +``` +is legal everywhere `ops1` is available as evidence. Alternatively, we can define `longestStrings` as a member of a normal object. But then the method has to be brought into scope to be usable as an extension method. + +```scala +object ops2 extends StringSeqOps +import ops2.longestStrings +List("here", "is", "a", "list").longestStrings +``` +The precise rules for resolving a selection to an extension method are as follows. + +Assume a selection `e.m[Ts]` where `m` is not a member of `e`, where the type arguments `[Ts]` are optional, +and where `T` is the expected type. The following two rewritings are tried in order: + + 1. The selection is rewritten to `m[Ts](e)`. + 2. If the first rewriting does not typecheck with expected type `T`, and there is evidence `i` + in either the current scope or in the evidence scope of `T`, and `i` defines an extension + method named `m`, then selection is expanded to `i.m[Ts](e)`. + This second rewriting is attempted at the time where the compiler also tries an implicit conversion + from `T` to a type containing `m`. If there is more than one way of rewriting, an ambiguity error results. + +So `circle.circumference` translates to `CircleOps.circumference(circle)`, provided +`circle` has type `Circle` and `CircleOps` is an eligible evidence value (i.e. it is visible at the point of call or it is defined in the companion object of `Circle`). + +### Evidence for Extension Methods + +Evidence that defines extension methods can also be defined without a `for` clause. E.g., + +```scala +evidence StringOps { + def (xs: Seq[String]) longestStrings: Seq[String] = { + val maxLength = xs.map(_.length).max + xs.filter(_.length == maxLength) + } +} + +evidence { + def (xs: List[T]) second[T] = xs.tail.head +} +``` +If such an evidence is anonymous (as in the second example above), its name is synthesized from the name +of the first defined extension method. + +### Operators + +The extension method syntax also applies to the definition of operators. +In each case the definition syntax mirrors the way the operator is applied. +Examples: +```scala + def (x: String) < (y: String) = ... + def (x: Elem) +: (xs: Seq[Elem]) = ... + + "ab" + "c" + 1 +: List(2, 3) +``` +The two definitions above translate to +```scala + def < (x: String)(y: String) = ... + def +: (xs: Seq[Elem])(x: Elem) = ... +``` +Note that swap of the two parameters `x` and `xs` when translating +the right-binding operator `+:` to an extension method. This is analogous +to the implementation of right binding operators as normal methods. + +### Generic Extensions + +The `StringSeqOps` examples extended a specific instance of a generic type. It is also possible to extend a generic type by adding type parameters to an extension method. Examples: + +```scala +def (xs: List[T]) second [T] = + xs.tail.head + +def (xs: List[List[T]]) flattened [T] = + xs.foldLeft[List[T]](Nil)(_ ++ _) + +def (x: T) + [T : Numeric](y: T): T = + the[Numeric[T]].plus(x, y) +``` + +As usual, type parameters of the extension method follow the defined method name. Nevertheless, such type parameters can already be used in the preceding parameter clause. + + +### Syntax + +The required syntax extension just adds one clause for extension methods relative +to the [current syntax](https://github.com/lampepfl/dotty/blob/master/docs/docs/internals/syntax.md). +``` +DefSig ::= ... + | ‘(’ DefParam ‘)’ [nl] id [DefTypeParamClause] DefParamClauses +``` + + + + diff --git a/docs/docs/reference/contextual-instance/import-implied.md b/docs/docs/reference/contextual-instance/import-implied.md new file mode 100644 index 000000000000..b4e7cc723620 --- /dev/null +++ b/docs/docs/reference/contextual-instance/import-implied.md @@ -0,0 +1,53 @@ +--- +layout: doc-page +title: "Instance Imports" +--- + +A special form of import is used to import implicit instances. Example: +```scala +object A { + class TC + instance tc of TC + def f given TC = ??? +} +object B { + import A._ + import instance A._ +} +``` +In the code above, the `import A._` clause of object `B` will import all members +of `A` _except_ the instance `tc`. Conversely, the second import `import instance A._` will import _only_ that instance. + +Generally, a normal import clause brings all members except implicit instance values into scope whereas an `import instance` clause brings only implicit instance values into scope. + +There are two main benefits arising from these rules: + + - It is made clearer where instance values in scope are coming from. + In particular, it is not possible to hide imported instance values + in a long list of regular imports. + - It enables importing all instance values + without importing anything else. This is particularly important since implicit + instances can be anonymous, so the usual recourse of using named imports is not + practical. + +### Relationship with Old-Style Implicits + +The rules of evidence imports above have the consequence that a library +would have to migrate in lockstep with all its users from old style implicit definitions and +normal imports to evidence definitions and evidence imports. + +The following modifications avoid this hurdle to migration. + + 1. An evidence import also brings old style implicits into scope. So, in Scala 3.0 + an old-style implicit definition can be brought into scope either by a normal or + by an evidence import. + + 2. In Scala 3.1, an old-style implicits accessed implicitly through a normal import + will give a deprecation warning. + + 3. In some version after 3.1, an old-style implicits accessed implicitly through a normal import + will give a compiler error. + +These rules mean that library users can use `import evidence` to access old-style implicits in Scala 3.0, +and will be gently nudged and then forced to do so in later versions. Libraries can then switch to +evidence definitions once their user base has migrated. diff --git a/docs/docs/reference/contextual-instance/inferable-by-name-parameters.md b/docs/docs/reference/contextual-instance/inferable-by-name-parameters.md new file mode 100644 index 000000000000..48e8939b62ad --- /dev/null +++ b/docs/docs/reference/contextual-instance/inferable-by-name-parameters.md @@ -0,0 +1,66 @@ +--- +layout: doc-page +title: "Implicit By-Name Parameters" +--- + +Inferable by-name parameters can be used to avoid a divergent inferred expansion. Example: + +```scala +trait Codec[T] { + def write(x: T): Unit +} + +instance intCodec of Codec[Int] = ??? + +instance optionCodec[T] given (ev: => Codec[T]) of Codec[Option[T]] { + def write(xo: Option[T]) = xo match { + case Some(x) => ev.write(x) + case None => + } +} + +val s = the[Codec[Option[Int]]] + +s.write(Some(33)) +s.write(None) +``` +As is the case for a normal by-name parameter, the argument for the inferable parameter `ev` +is evaluated on demand. In the example above, if the option value `x` is `None`, it is +not evaluated at all. + +The synthesized argument for an inferable parameter is backed by a local val +if this is necessary to prevent an otherwise diverging expansion. + +The precise steps for constructing an inferable argument for a by-name parameter of type `=> T` are as follows. + + 1. Create a new implicit instance of type `T`: + + ```scala + instance lv of T = ??? + ``` + where `lv` is an arbitrary fresh name. + + 1. This instance is not immediately available as candidate for argument inference (making it immediately available could result in a loop in the synthesized computation). But it becomes available in all nested contexts that look again for an inferred argument to a by-name parameter. + + 1. If this search succeeds with expression `E`, and `E` contains references to the evidence `lv`, replace `E` by + + + ```scala + { instance lv of T = E; lv } + ``` + + Otherwise, return `E` unchanged. + +In the example above, the definition of `s` would be expanded as follows. + +```scala +val s = the[Test.Codec[Option[Int]]]( + optionCodec[Int](intCodec)) +``` + +No local instance was generated because the synthesized argument is not recursive. + +### Reference + +For more info, see [Issue #1998](https://github.com/lampepfl/dotty/issues/1998) +and the associated [Scala SIP](https://docs.scala-lang.org/sips/byname-implicits.html). diff --git a/docs/docs/reference/contextual-instance/inferable-params.md b/docs/docs/reference/contextual-instance/inferable-params.md new file mode 100644 index 000000000000..92b79c6823ce --- /dev/null +++ b/docs/docs/reference/contextual-instance/inferable-params.md @@ -0,0 +1,111 @@ +--- +layout: doc-page +title: "Given Clauses" +--- + +Functional programming tends to express most dependencies as simple function parameterization. +This is clean and powerful, but it sometimes leads to functions that take many parameters and +call trees where the same value is passed over and over again in long call chains to many +functions. Given clauses can help here since they enable the compiler to synthesize +repetitive arguments instead of the programmer having to write them explicitly. + +For example, given the [instance definitions](./instance-defs.md) of the previous section, +a maximum function that works for any arguments for which an ordering exists can be defined as follows: +```scala +def max[T](x: T, y: T) given (ord: Ord[T]): T = + if (ord.compare(x, y) < 1) y else x +``` +Here, the part following `given` introduces a constraint that `T` is ordered, or, otherwise put, that an implicit instance for `Ord[T]` exists. +That instance is passed as an _implicit parameter_ to the method. Inside the method, the implicit instance can be accessed under the name `ord`. + +The `max` method can be applied as follows: +```scala +max(2, 3) given IntOrd +``` +The `given IntOrd` part establishes `IntOrd` as the instance to satisfy the constraint `Ord[Int]`. +It does this by providing the `IntOrd` value as as an argument for the implicit `ord` parameter. +But the point of implicit parameters is that this argument can also be left out (and it usually is). +So the following applications are equally valid: +```scala +max(2, 3) +max(List(1, 2, 3), Nil) +``` + +## Anonymous Inferable Parameters + +In many situations, the name of an implicit parameter of a method need not be mentioned explicitly at all, +since it is only used as a synthesized instance for other constraints. In that case one can avoid defining +a parameter name and just provide its type. Example: +```scala +def maximum[T](xs: List[T]) given Ord[T]: T = + xs.reduceLeft(max) +``` +`maximum` takes an implicit parameter of type `Ord` only to pass it on as an implicit argument to `max`. The name of the parameter is left out. + +Generally, implicit parameters may be given either as a parameter list `(p_1: T_1, ..., p_n: T_n)` or as a sequence of types, separated by commas. + +## Inferring Complex Arguments + +Here are two other methods that require implicits of type `Ord[T]`: +```scala +def descending[T] given (asc: Ord[T]): Ord[T] = new Ord[T] { + def compare(x: T, y: T) = asc.compare(y, x) +} + +def minimum[T](xs: List[T]) given Ord[T] = + maximum(xs) given descending +``` +The `minimum` method's right hand side passes `descending` as an explicit argument to `maximum(xs)`. +With this setup, the following calls are all well-formed, and they all normalize to the last one: +```scala +minimum(xs) +maximum(xs) given descending +maximum(xs) given (descending given ListOrd) +maximum(xs) given (descending given (ListOrd given IntOrd)) +``` + +## Mixing Inferable And Normal Parameters + +Inferable parameters can be freely mixed with normal parameters. +An inferable parameter may be followed by a normal parameter and _vice versa_. +There can be several inferable parameter lists in a definition. Example: +```scala +def f given (u: Universe) (x: u.T) given Context = ... + +instance global of Universe { type T = String ... } +instance ctx of Context { ... } +``` +Then the following calls are all valid (and normalize to the last one) +```scala +f("abc") +(f given global)("abc") +f("abc") given ctx +(f given global)("abc") given ctx +``` + +## Summoning Instances + +A method `the` in `Predef` summons the implicit instance for a given type. For example, the instance for `Ord[List[Int]]` is generated by +```scala +the[Ord[List[Int]]] // reduces to ListOrd given IntOrd +``` +The `the` method is simply defined as the (non-widening) identity function over an implicit parameter. +```scala +def the[T] given (x: T): x.type = x +``` +Functions like `the` that have only implicit parameters are also called _context queries_. + +## Syntax + +Here is the new syntax of parameters and arguments seen as a delta from the [standard context free syntax of Scala 3](http://dotty.epfl.ch/docs/internals/syntax.html). +``` +ClsParamClause ::= ... + | ‘given’ (‘(’ [ClsParams] ‘)’ | GivenTypes) +DefParamClause ::= ... + | GivenParamClause +GivenParamClause ::= ‘given’ (‘(’ DefParams ‘)’ | GivenTypes) +GivenTypes ::= AnnotType {‘,’ AnnotType} + +InfixExpr ::= ... + | InfixExpr ‘given’ (InfixExpr | ParArgumentExprs) +``` diff --git a/docs/docs/reference/contextual-instance/instance-defs.md b/docs/docs/reference/contextual-instance/instance-defs.md new file mode 100644 index 000000000000..229d448b81e6 --- /dev/null +++ b/docs/docs/reference/contextual-instance/instance-defs.md @@ -0,0 +1,78 @@ +--- +layout: doc-page +title: "Instance Definitions" +--- + +Instance definitions define "canonical" values of given types +that can be synthesized by the compiler. Typically, such values are +used as implicit arguments for constraints in [given clauses](./inferable-params.html). Example: + +```scala +trait Ord[T] { + def compare(x: T, y: T): Int + def (x: T) < (y: T) = compare(x, y) < 0 + def (x: T) > (y: T) = compare(x, y) > 0 +} + +instance IntOrd of Ord[Int] { + def compare(x: Int, y: Int) = + if (x < y) -1 else if (x > y) +1 else 0 +} + +instance ListOrd[T] given (ord: Ord[T]) of Ord[List[T]] { + def compare(xs: List[T], ys: List[T]): Int = (xs, ys) match { + case (Nil, Nil) => 0 + case (Nil, _) => -1 + case (_, Nil) => +1 + case (x :: xs1, y :: ys1) => + val fst = ord.compare(x, y) + if (fst != 0) fst else xs1.compareTo(ys1) + } +} +``` +This code defines a trait `Ord` and two instance definitions. `IntOrd` defines +an implicit instance of type `Ord[Int]` whereas `ListOrd[T]` defines implicit instances of type `Ord[List[T]]` +for all types `T` that come with an instance for `Ord[T]` themselves. +The `given` clause in `ListOrd` defines an [implicit parameter](./inferable-params.html). +Given clauses are further explained in the next section. + +## Anonymous Instance Definitions + +The name of an implicit instance can be left out. So the instance definitions +of the last section can also be expressed like this: +```scala +instance of Ord[Int] { ... } +instance [T] given (ord: Ord[T]) of Ord[List[T]] { ... } +``` +If a name is not given, the compiler will synthesize one from the type(s) in the `for` clause. + +## instance Aliases + +An instance alias defines an implicit instance that is equal to some expression. E.g., assuming a global method `currentThreadPool` returning a value with a member `context`, one could define: +```scala +instance ctx of ExecutionContext = currentThreadPool().context +``` +This creates an implicit instance `ctx` of type `ExecutionContext` that resolves to the right hand side `currentThreadPool().context`. +Each time an instance for `ExecutionContext` is demanded, the result of evaluating the right-hand side expression is returned. + +Alias instances may be anonymous, e.g. +```scala +instance of Position = enclosingTree.position +``` +An instance alias can have type and context parameters just like any other instance definition, but it can only implement a single type. + +## Syntax + +Here is the new syntax of instance definitions, seen as a delta from the [standard context free syntax of Scala 3](http://dotty.epfl.ch/docs/internals/syntax.html). +``` +TmplDef ::= ... + | ‘instance’ InstanceDef +InstanceDef ::= [id] InstanceParams InstanceBody +InstanceParams ::= [DefTypeParamClause] {GivenParamClause} +GivenParamClause ::= ‘given’ (‘(’ [DefParams] ‘)’ | GivenTypes) +InstanceBody ::= [‘for’ ConstrApp {‘,’ ConstrApp }] [TemplateBody] + | ‘for’ Type ‘=’ Expr +GivenTypes ::= AnnotType {‘,’ AnnotType} +``` +The identifier `id` can be omitted only if either the `for` part or the template body is present. +If the `for` part is missing, the template body must define at least one extension method. diff --git a/docs/docs/reference/contextual-instance/motivation.md b/docs/docs/reference/contextual-instance/motivation.md new file mode 100644 index 000000000000..366e78130c64 --- /dev/null +++ b/docs/docs/reference/contextual-instance/motivation.md @@ -0,0 +1,81 @@ +--- +layout: doc-page +title: "Overview" +--- + +### Critique of the Status Quo + +Scala's implicits are its most distinguished feature. They are _the_ fundamental way to abstract over context. They represent a unified paradigm with a great variety of use cases, among them: implementing type classes, establishing context, dependency injection, expressing capabilities, computing new types and proving relationships between them. + +Following Haskell, Scala was the second popular language to have some form of implicits. Other languages have followed suit. E.g Rust's traits or Swift's protocol extensions. Design proposals are also on the table for Kotlin as [compile time dependency resolution](https://github.com/Kotlin/KEEP/blob/e863b25f8b3f2e9b9aaac361c6ee52be31453ee0/proposals/compile-time-dependency-resolution.md), for C# as [Shapes and Extensions](https://github.com/dotnet/csharplang/issues/164) +or for F# as [Traits](https://github.com/MattWindsor91/visualfsharp/blob/hackathon-vs/examples/fsconcepts.md). Implicits are also a common feature of theorem provers such as Coq or Agda. + +Even though these designs use widely different terminology, they are all variants of the core idea of _term inference_. Given a type, the compiler synthesizes a "canonical" term that has that type. Scala embodies the idea in a purer form than most other languages: An implicit parameter directly leads to an inferred argument term that could also be written down explicitly. By contrast, typeclass based designs are less direct since they hide term inference behind some form of type classification and do not offer the option of writing the inferred quantities (typically, dictionaries) explicitly. + +Given that term inference is where the industry is heading, and given that Scala has it in a very pure form, how come implicits are not more popular? In fact, it's fair to say that implicits are at the same time Scala's most distinguished and most controversial feature. I believe this is due to a number of aspects that together make implicits harder to learn than necessary and also make it harder to prevent abuses. + +Particular criticisms are: + +1. Being very powerful, implicits are easily over-used and mis-used. This observation holds in almost all cases when we talk about _implicit conversions_, which, even though conceptually different, share the same syntax with other implicit definitions. For instance, regarding the two definitions + + ```scala + implicit def i1(implicit x: T): C[T] = ... + implicit def i2(x: T): C[T] = ... + ``` + + the first of these is a conditional implicit _value_, the second an implicit _conversion_. Conditional implicit values are a cornerstone for expressing type classes, whereas most applications of implicit conversions have turned out to be of dubious value. The problem is that many newcomers to the language start with defining implicit conversions since they are easy to understand and seem powerful and convenient. Scala 3 will put under a language flag both definitions and applications of "undisciplined" implicit conversions between types defined elsewhere. This is a useful step to push back against overuse of implicit conversions. But the problem remains that syntactically, conversions and values just look too similar for comfort. + + 2. Another widespread abuse is over-reliance on implicit imports. This often leads to inscrutable type errors that go away with the right import incantation, leaving a feeling of frustration. Conversely, it is hard to see what implicits a program uses since implicits can hide anywhere in a long list of imports. + + 3. The syntax of implicit definitions is too minimal. It consists of a single modifier, `implicit`, that can be attached to a large number of language constructs. A problem with this for newcomers is that it conveys mechanism instead of intent. For instance, a typeclass instance is an implicit object or val if unconditional and an implicit def with implicit parameters referring to some class if conditional. This describes precisely what the implicit definitions translate to -- just drop the `implicit` modifier, and that's it! But the cues that define intent are rather indirect and can be easily misread, as demonstrated by the definitions of `i1` and `i2` above. + + 4. The syntax of implicit parameters also has shortcomings. It starts with the position of `implicit` as a pseudo-modifier that applies to a whole parameter section instead of a single parameter. This represents an irregular case wrt to the rest of Scala's syntax. Furthermore, while implicit _parameters_ are designated specifically, arguments are not. Passing an argument to an implicit parameter looks like a regular application `f(arg)`. This is problematic because it means there can be confusion regarding what parameter gets instantiated in a call. For instance, in + ```scala + def currentMap(implicit ctx: Context): Map[String, Int] + ``` + one cannot write `currentMap("abc")` since the string "abc" is taken as explicit argument to the implicit `ctx` parameter. One has to write `currentMap.apply("abc")` instead, which is awkward and irregular. For the same reason, a method definition can only have one implicit parameter section and it must always come last. This restriction not only reduces orthogonality, but also prevents some useful program constructs, such as a method with a regular parameter whose type depends on an implicit value. Finally, it's also a bit annoying that implicit parameters must have a name, even though in many cases that name is never referenced. + + 5. Implicits pose challenges for tooling. The set of available implicits depends on context, so command completion has to take context into account. This is feasible in an IDE but docs like ScalaDoc that are based static web pages can only provide an approximation. Another problem is that failed implicit searches often give very unspecific error messages, in particular if some deeply recursive implicit search has failed. Note that the Dotty compiler already implements some improvements in this case, but challenges still remain. + +None of the shortcomings is fatal, after all implicits are very widely used, and many libraries and applications rely on them. But together, they make code using implicits a lot more cumbersome and less clear than it could be. + +Historically, many of these shortcomings come from the way implicits were gradually "discovered" in Scala. Scala originally had only implicit conversions with the intended use case of "extending" a class or trait after it was defined, i.e. what is expressed by implicit classes in later versions of Scala. Implicit parameters and instance definitions came later in 2006 and picked similar syntax since it seemed convenient. For the same reason, no effort was made to distinguish implicit imports or arguments from normal ones. + +Existing Scala programmers by and large have gotten used to the status quo and see little need for change. But for newcomers this status quo presents a big hurdle. I believe if we want to overcome that hurdle, we should take a step back and allow ourselves to consider a radically new design. + +### The New Design + +The following pages introduce a redesign of contextual abstractions in Scala. They introduce four fundamental changes: + + 1. [Instance Definitions](./instance-defs.html) are a new way to define inferable terms. They replace implicit definitions. The core principle of the proposal is that, rather than mixing the `implicit` modifier with a large number of features, we have a single way to define terms that can be synthesized for types. + + 2. [Given Clauses](./inferable-params.html) are a new syntax for implicit _parameters_ and their _arguments_. Both are introduced with the same keyword, `given`. This unambiguously aligns parameters and arguments, solving a number of language warts. + + 3. [Instance Imports](./import-implied.html) are new form of import that specifically imports implicit definitions and nothing else. New-style instance definitions _must be_ imported with `import instance`, a plain import will no longer bring them into scope. Old-style implicit definitions can be imported with either form. + + 4. [Implicit Conversions](./conversions.html) are now expressed as implicit instances of a standard `Conversion` class. All other forms of implicit conversions will be phased out. + +This section also contains pages describing other language features that are related to context abstraction. These are: + + - [Context Bounds](./context-bounds.html), which carry over unchanged. + - [Extension Methods](./extension-methods.html) replace implicit classes in a way that integrates better with typeclasses. + - [Implementing Typeclasses](./typeclasses.html) demonstrates how some common typeclasses can be implemented using the new constructs. + - [Typeclass Derivation](./derivation.html) introduces constructs to automatically derive typeclasses for ADTs. + - [Multiversal Equality](./multiversal-equality.html) introduces a special typeclass + to support type safe equality. + - [Context Queries](./query-types.html) _aka_ implicit function types introduce a way to abstract over implicit parameterization. + - [Inferable By-Name Parameters](./inferable-by-name-parameters.html) are an essential tool to define recursive implicits without looping. + - [Relationship with Scala 2 Implicits](./relationship-implicits.html) discusses the relationship between old-style and new-style implicits and how to migrate from one to the other. + +Overall, the new design achieves a better separation of term inference from the rest of the language: There is a single way to define implicit instances instead of a multitude of forms all taking an `implicit` modifier. There is a single way to introduce implicit parameters and arguments instead of conflating implicit with normal arguments. There is a separate way to import implicit instances that does not allow to hide them in a sea of normal imports. And there is a single way to define an implicit conversion which is clearly marked as such and does not require special syntax. + +This design thus avoids feature interactions and makes the language more consistent and orthogonal. It will make implicits easier to learn and harder to abuse. It will greatly improve the clarity of the 95% of Scala programs that use implicits. It has thus the potential to fulfil the promise of term inference in a principled way that is also accessible and friendly. + +Could we achieve the same goals by tweaking existing implicits? After having tried for a long time, I believe now that this is impossible. + + - First, some of the problems are clearly syntactic and require different syntax to solve them. + - Second, there is the problem how to migrate. We cannot change the rules in mid-flight. At some stage of language evolution we need to accommodate both the new and the old rules. With a syntax change, this is easy: Introduce the new syntax with new rules, support the old syntax for a while to facilitate cross compilation, deprecate and phase out the old syntax at some later time. Keeping the same syntax does not offer this path, and in fact does not seem to offer any viable path for evolution + - Third, even if we would somehow succeed with migration, we still have the problem + how to teach this. We cannot make existing tutorials go away. Almost all existing tutorials start with implicit conversions, which will go away; they use normal imports, which will go away, and they explain calls to methods with implicit parameters by expanding them to plain applications, which will also go away. This means that we'd have + to add modifications and qualifications to all existing literature and courseware, likely causing more confusion with beginners instead of less. By contrast, with a new syntax there is a clear criterion: Any book or courseware that mentions `implicit` is outdated and should be updated. + diff --git a/docs/docs/reference/contextual-instance/multiversal-equality.md b/docs/docs/reference/contextual-instance/multiversal-equality.md new file mode 100644 index 000000000000..cd9523c38a8e --- /dev/null +++ b/docs/docs/reference/contextual-instance/multiversal-equality.md @@ -0,0 +1,217 @@ +--- +layout: doc-page +title: "Multiversal Equality" +--- + +Previously, Scala had universal equality: Two values of any types +could be compared with each other with `==` and `!=`. This came from +the fact that `==` and `!=` are implemented in terms of Java's +`equals` method, which can also compare values of any two reference +types. + +Universal equality is convenient. But it is also dangerous since it +undermines type safety. For instance, let's assume one is left after some refactoring +with an erroneous program where a value `y` has type `S` instead of the correct type `T`. + +```scala +val x = ... // of type T +val y = ... // of type S, but should be T +x == y // typechecks, will always yield false +``` + +If all the program does with `y` is compare it to other values of type `T`, the program will still typecheck, since values of all types can be compared with each other. +But it will probably give unexpected results and fail at runtime. + +Multiversal equality is an opt-in way to make universal equality +safer. It uses a binary typeclass `Eql` to indicate that values of +two given types can be compared with each other. +The example above would not typecheck if `S` or `T` was a class +that derives `Eql`, e.g. +```scala +class T derives Eql +``` +Alternatively, one can also provide the derived evidence directly, like this: +```scala +instance of Eql[T, T] = Eql.derived +``` +This definition effectively says that values of type `T` can (only) be +compared to other values of type `T` when using `==` or `!=`. The definition +affects type checking but it has no significance for runtime +behavior, since `==` always maps to `equals` and `!=` always maps to +the negation of `equals`. The right hand side `Eql.derived` of the definition +is a value that has any `Eql` instance as its type. Here is the definition of class +`Eql` and its companion object: +```scala +package scala +import annotation.implicitNotFound + +@implicitNotFound("Values of types ${L} and ${R} cannot be compared with == or !=") +sealed trait Eql[-L, -R] + +object Eql { + object derived extends Eql[Any, Any] +} +``` + +One can have several `Eql` instances for a type. For example, the four +definitions below make values of type `A` and type `B` comparable with +each other, but not comparable to anything else: + +```scala +instance of Eql[A, A] = Eql.derived +instance of Eql[B, B] = Eql.derived +instance of Eql[A, B] = Eql.derived +instance of Eql[B, A] = Eql.derived +``` +The `scala.Eql` object defines a number of `Eql` instances that together +define a rule book for what standard types can be compared (more details below). + +There's also a "fallback" instance named `eqlAny` that allows comparisons +over all types that do not themselves have an `Eql` instance. `eqlAny` is +defined as follows: + +```scala +def eqlAny[L, R]: Eql[L, R] = Eql.derived +``` + +Even though `eqlAny` is not declared as `evidence`, the compiler will still +construct an `eqlAny` instance as answer to an implicit search for the +type `Eql[L, R]`, unless `L` or `R` have `Eql` instances +defined on them, or the language feature `strictEquality` is enabled + +The primary motivation for having `eqlAny` is backwards compatibility, +if this is of no concern one can disable `eqlAny` by enabling the language +feature `strictEquality`. As for all language features this can be either +done with an import + +```scala +import scala.language.strictEquality +``` +or with a command line option `-language:strictEquality`. + +## Deriving Eql Instances + +Instead of defining `Eql` instances directly, it is often more convenient to derive them. Example: +```scala +class Box[T](x: T) derives Eql +``` +By the usual rules if [typeclass derivation](./derivation.html), +this generates the following `Eql` instance in the companion object of `Box`: +```scala +instance [T, U] given Eql[T, U] of Eql[Box[T], Box[U]] = Eql.derived +``` +That is, two boxes are comparable with `==` or `!=` if their elements are. Examples: +```scala +new Box(1) == new Box(1L) // ok since there is instance of `Eql[Int, Long]` +new Box(1) == new Box("a") // error: can't compare +new Box(1) == 1 // error: can't compare +``` + +## Precise Rules for Equality Checking + +The precise rules for equality checking are as follows. + +If the `strictEquality` feature is enabled then +a comparison using `x == y` or `x != y` between values `x: T` and `y: U` +is legal if + + 1. there is an instance of `Eql[T, U]`, or + 2. one of `T`, `U` is `Null`. + +In the default case where the `strictEquality` feature is not enabled the comparison is +also legal if + + 1. `T` and `U` the same, or + 2. one of `T` and `U`is a subtype of the _lifted_ version of the other type, or + 3. neither `T` nor `U` have a _reflexive `Eql` instance_. + +Explanations: + + - _lifting_ a type `S` means replacing all references to abstract types + in covariant positions of `S` by their upper bound, and to replacing + all refinement types in covariant positions of `S` by their parent. + - a type `T` has a _reflexive `Eql` instance_ if the implicit search for `Eql[T, T]` + succeeds. + +## Predefined Eql Instances + +The `Eql` object defines implicit instances for + - the primitive types `Byte`, `Short`, `Char`, `Int`, `Long`, `Float`, `Double`, `Boolean`, and `Unit`, + - `java.lang.Number`, `java.lang.Boolean`, and `java.lang.Character`, + - `scala.collection.Seq`, and `scala.collection.Set`. + +Instances are defined so that everyone of these types is has a reflexive `Eql` evidence, and the following holds: + + - Primitive numeric types can be compared with each other. + - Primitive numeric types can be compared with subtypes of `java.lang.Number` (and _vice versa_). + - `Boolean` can be compared with `java.lang.Boolean` (and _vice versa_). + - `Char` can be compared with `java.lang.Character` (and _vice versa_). + - Two sequences (of arbitrary subtypes of `scala.collection.Seq`) can be compared + with each other if their element types can be compared. The two sequence types + need not be the same. + - Two sets (of arbitrary subtypes of `scala.collection.Set`) can be compared + with each other if their element types can be compared. The two set types + need not be the same. + - Any subtype of `AnyRef` can be compared with `Null` (and _vice versa_). + +## Why Two Type Parameters? + +One particular feature of the `Eql` type is that it takes _two_ type parameters, representing the types of the two items to be compared. By contrast, conventional +implementations of an equality type class take only a single type parameter which represents the common type of _both_ operands. One type parameter is simpler than two, so why go through the additional complication? The reason has to do with the fact that, rather than coming up with a type class where no operation existed before, +we are dealing with a refinement of pre-existing, universal equality. It's best illustrated through an example. + +Say you want to come up with a safe version of the `contains` method on `List[T]`. The original definition of `contains` in the standard library was: +```scala +class List[+T] { + ... + def contains(x: Any): Boolean +} +``` +That uses universal equality in an unsafe way since it permits arguments of any type to be compared with the list's elements. The "obvious" alternative definition +```scala + def contains(x: T): Boolean +``` +does not work, since it refers to the covariant parameter `T` in a nonvariant context. The only variance-correct way to use the type parameter `T` in `contains` is as a lower bound: +```scala + def contains[U >: T](x: U): Boolean +``` +This generic version of `contains` is the one used in the current (Scala 2.12) version of `List`. +It looks different but it admits exactly the same applications as the `contains(x: Any)` definition we started with. +However, we can make it more useful (i.e. restrictive) by adding an `Eql` parameter: +```scala + def contains[U >: T](x: U) given Eql[T, U]: Boolean // (1) +``` +This version of `contains` is equality-safe! More precisely, given +`x: T`, `xs: List[T]` and `y: U`, then `xs.contains(y)` is type-correct if and only if +`x == y` is type-correct. + +Unfortunately, the crucial ability to "lift" equality type checking from simple equality and pattern matching to arbitrary user-defined operations gets lost if we restrict ourselves to an equality class with a single type parameter. Consider the following signature of `contains` with a hypothetical `Eql1[T]` type class: +```scala + def contains[U >: T](x: U) given Eql1[U]: Boolean // (2) +``` +This version could be applied just as widely as the original `contains(x: Any)` method, +since the `Eql1[Any]` fallback is always available! So we have gained nothing. What got lost in the transition to a single parameter type class was the original rule that `Eql[A, B]` is available only if neither `A` nor `B` have a reflexive `Eql` instance. That rule simply cannot be expressed if there is a single type parameter for `Eql`. + +The situation is different under `-language:strictEquality`. In that case, +the `Eql[Any, Any]` or `Eql1[Any]` instances would never be available, and the +single and two-parameter versions would indeed coincide for most practical purposes. + +But assuming `-language:strictEquality` immediately and everywhere poses migration problems which might well be unsurmountable. Consider again `contains`, which is in the standard library. Parameterizing it with the `Eql` type class as in (1) is an immediate win since it rules out non-sensical applications while still allowing all sensible ones. +So it can be done almost at any time, modulo binary compatibility concerns. +On the other hand, parameterizing `contains` with `Eql1` as in (2) would make `contains` +unusable for all types that have not yet declared an `Eql1` instance, including all +types coming from Java. This is clearly unacceptable. It would lead to a situation where, +rather than migrating existing libraries to use safe equality, the only upgrade path is to have parallel libraries, with the new version only catering to types deriving `Eql1` and the old version dealing with everything else. Such a split of the ecosystem would be very problematic, which means the cure is likely to be worse than the disease. + +For these reasons, it looks like a two-parameter type class is the only way forward because it can take the existing ecosystem where it is and migrate it towards a future where more and more code uses safe equality. + +In applications where `-language:strictEquality` is the default one could also introduce a one-parameter type alias such as +```scala +type Eq[-T] = Eql[T, T] +``` +Operations needing safe equality could then use this alias instead of the two-parameter `Eql` class. But it would only +work under `-language:strictEquality`, since otherwise the universal `Eq[Any]` instance would be available everywhere. + + +More on multiversal equality is found in a [blog post](http://www.scala-lang.org/blog/2016/05/06/multiversal-equality.html) +and a [Github issue](https://github.com/lampepfl/dotty/issues/1247). diff --git a/docs/docs/reference/contextual-instance/query-types-spec.md b/docs/docs/reference/contextual-instance/query-types-spec.md new file mode 100644 index 000000000000..67c627ce79f4 --- /dev/null +++ b/docs/docs/reference/contextual-instance/query-types-spec.md @@ -0,0 +1,79 @@ +--- +layout: doc-page +title: "Context Query Types - More Details" +--- + +## Syntax + + Type ::= ... + | `given' FunArgTypes `=>' Type + Expr ::= ... + | `given' FunParams `=>' Expr + +Context query types associate to the right, e.g. +`given S => given T => U` is the same as `given S => (given T => U)`. + +## Implementation + +Context query types are shorthands for class types that define `apply` +methods with inferable parameters. Specifically, the `N`-ary function type +`T1, ..., TN => R` is a shorthand for the class type +`ImplicitFunctionN[T1 , ... , TN, R]`. Such class types are assumed to have the following definitions, for any value of `N >= 1`: +```scala +package scala +trait ImplicitFunctionN[-T1 , ... , -TN, +R] { + def apply given (x1: T1 , ... , xN: TN): R +} +``` +Context query types erase to normal function types, so these classes are +generated on the fly for typechecking, but not realized in actual code. + +Context query literals `given (x1: T1, ..., xn: Tn) => e` map +inferable parameters `xi` of types `Ti` to a result given by expression `e`. +The scope of each implicit parameter `xi` is `e`. The parameters must have pairwise distinct names. + +If the expected type of the query literal is of the form +`scala.ImplicitFunctionN[S1, ..., Sn, R]`, the expected type of `e` is `R` and +the type `Ti` of any of the parameters `xi` can be omitted, in which case `Ti += Si` is assumed. If the expected type of the query literal is +some other type, all inferable parameter types must be explicitly given, and the expected type of `e` is undefined. The type of the query literal is `scala.ImplicitFunctionN[S1, ...,Sn, T]`, where `T` is the widened +type of `e`. `T` must be equivalent to a type which does not refer to any of +the inferable parameters `xi`. + +The query literal is evaluated as the instance creation +expression: +```scala +new scala.ImplicitFunctionN[T1, ..., Tn, T] { + def apply given (x1: T1, ..., xn: Tn): T = e +} +``` +In the case of a single untyped parameter, `given (x) => e` can be +abbreviated to `given x => e`. + +An inferable parameter may also be a wildcard represented by an underscore `_`. In +that case, a fresh name for the parameter is chosen arbitrarily. + +Note: The closing paragraph of the +[Anonymous Functions section](https://www.scala-lang.org/files/archive/spec/2.12/06-expressions.html#anonymous-functions) +of Scala 2.12 is subsumed by query types and should be removed. + +Query literals `given (x1: T1, ..., xn: Tn) => e` are +automatically created for any expression `e` whose expected type is +`scala.ImplicitFunctionN[T1, ..., Tn, R]`, unless `e` is +itself a query literal. This is analogous to the automatic +insertion of `scala.Function0` around expressions in by-name argument position. + +Context query types generalize to `N > 22` in the same way that function types do, see [the corresponding +documentation](https://dotty.epfl.ch/docs/reference/dropped-features/limit22.html). + +## Examples + +See the section on Expressiveness from [Simplicitly: foundations and +applications of implicit function +types](https://dl.acm.org/citation.cfm?id=3158130). I've extracted it in [this +Gist](https://gist.github.com/OlivierBlanvillain/234d3927fe9e9c6fba074b53a7bd9 +592), it might easier to access than the pdf. + +### Type Checking + +After desugaring no additional typing rules are required for context query types. diff --git a/docs/docs/reference/contextual-instance/query-types.md b/docs/docs/reference/contextual-instance/query-types.md new file mode 100644 index 000000000000..716a6b9ad10d --- /dev/null +++ b/docs/docs/reference/contextual-instance/query-types.md @@ -0,0 +1,160 @@ +--- +layout: doc-page +title: "Context Queries" +--- + +_Context queries_ are functions with (only) inferable parameters. +_Context query types_ are the types of first-class context queries. +Here is an example for a context query type: +```scala +type Contextual[T] = given Context => T +``` +A value of context query type is applied to inferred arguments, in +the same way a method with inferable parameters is applied. For instance: +```scala + instance ctx of Context = ... + + def f(x: Int): Contextual[Int] = ... + + f(2) given ctx // explicit argument + f(2) // argument is inferred +``` +Conversely, if the expected type of an expression `E` is a context query +type `given (T_1, ..., T_n) => U` and `E` is not already a +context query literal, `E` is converted to a context query literal by rewriting to +```scala + given (x_1: T1, ..., x_n: Tn) => E +``` +where the names `x_1`, ..., `x_n` are arbitrary. This expansion is performed +before the expression `E` is typechecked, which means that `x_1`, ..., `x_n` +are available as evidence in `E`. + +Like query types, query literals are written with a `given` prefix. They differ from normal function literals in two ways: + + 1. Their parameters are inferable. + 2. Their types are context query types. + +For example, continuing with the previous definitions, +```scala + def g(arg: Contextual[Int]) = ... + + g(22) // is expanded to g(given ctx => 22) + + g(f(2)) // is expanded to g(given ctx => f(2) given ctx) + + g(given ctx => f(22) given ctx) // is left as it is +``` +### Example: Builder Pattern + +Context query types have considerable expressive power. For +instance, here is how they can support the "builder pattern", where +the aim is to construct tables like this: +```scala + table { + row { + cell("top left") + cell("top right") + } + row { + cell("bottom left") + cell("bottom right") + } + } +``` +The idea is to define classes for `Table` and `Row` that allow +addition of elements via `add`: +```scala + class Table { + val rows = new ArrayBuffer[Row] + def add(r: Row): Unit = rows += r + override def toString = rows.mkString("Table(", ", ", ")") + } + + class Row { + val cells = new ArrayBuffer[Cell] + def add(c: Cell): Unit = cells += c + override def toString = cells.mkString("Row(", ", ", ")") + } + + case class Cell(elem: String) +``` +Then, the `table`, `row` and `cell` constructor methods can be defined +in terms of query types to avoid the plumbing boilerplate +that would otherwise be necessary. +```scala + def table(init: given Table => Unit) = { + instance t of Table + init + t + } + + def row(init: given Row => Unit) given (t: Table) = { + instance r of Row + init + t.add(r) + } + + def cell(str: String) given (r: Row) = + r.add(new Cell(str)) +``` +With that setup, the table construction code above compiles and expands to: +```scala + table { given $t: Table => + row { given $r: Row => + cell("top left") given $r + cell("top right") given $r + } given $t + row { given $r: Row => + cell("bottom left") given $r + cell("bottom right") given $r + } given $t + } +``` +### Example: Postconditions + +As a larger example, here is a way to define constructs for checking arbitrary postconditions using an extension method `ensuring`so that the checked result can be referred to simply by `result`. The example combines opaque aliases, context query types, and extension methods to provide a zero-overhead abstraction. + +```scala +object PostConditions { + opaque type WrappedResult[T] = T + + private object WrappedResult { + def wrap[T](x: T): WrappedResult[T] = x + def unwrap[T](x: WrappedResult[T]): T = x + } + + def result[T] given (r: WrappedResult[T]): T = WrappedResult.unwrap(r) + + def (x: T) ensuring [T](condition: given WrappedResult[T] => Boolean): T = { + instance of WrappedResult[T] = WrappedResult.wrap(x) + assert(condition) + x + } +} + +object Test { + import PostConditions.{ensuring, result} + val s = List(1, 2, 3).sum.ensuring(result == 6) +} +``` +**Explanations**: We use a context query type `given WrappedResult[T] => Boolean` +as the type of the condition of `ensuring`. An argument to `ensuring` such as +`(result == 6)` will therefore have evidence of type `WrappedResult[T]` in +scope to pass along to the `result` method. `WrappedResult` is a fresh type, to make sure +that we do not get unwanted evidence types in scope (this is good practice in all cases +where given clauses are involved). Since `WrappedResult` is an opaque type alias, its +values need not be boxed, and since `ensuring` is added as an extension method, its argument +does not need boxing either. Hence, the implementation of `ensuring` is as about as efficient +as the best possible code one could write by hand: + + { val result = List(1, 2, 3).sum + assert(result == 6) + result + } + +### Reference + +For more info, see the [blog article](https://www.scala-lang.org/blog/2016/12/07/implicit-function-types.html), +(which uses a different syntax that has been superseded). + +[More details](./query-types-spec.html) diff --git a/docs/docs/reference/contextual-instance/relationship-implicits.md b/docs/docs/reference/contextual-instance/relationship-implicits.md new file mode 100644 index 000000000000..30f15eafd4a8 --- /dev/null +++ b/docs/docs/reference/contextual-instance/relationship-implicits.md @@ -0,0 +1,169 @@ +--- +layout: doc-page +title: Relationship with Scala 2 Implicits +--- + +Many, but not all, of the new contextual abstraction features in Scala 3 can be mapped to Scala 2's implicits. This page gives a rundown on the relationships between new and old features. + +## Simulating Contextual Abstraction with Implicits + +### Instance Definitions + +Instance definitions can be mapped to combinations of implicit objects, classes and implicit methods. + + 1. Instance definitions without parameters are mapped to implicit objects. E.g., + ```scala + instance IntOrd of Ord[Int] { ... } + ``` + maps to + ```scala + implicit object IntOrd extends Ord[Int] { ... } + ``` + 2. Parameterized instance definitions are mapped to combinations of classes and implicit methods. E.g., + ```scala + instance ListOrd[T] given (ord: Ord[T]) of Ord[List[T]] { ... } + ``` + maps to + ```scala + class ListOrd[T](implicit ord: Ord[T]) extends Ord[List[T]] { ... } + final implicit def ListOrd[T](implicit ord: Ord[T]): ListOrd[T] = new ListOrd[T] + ``` + 3. Instance aliases map to implicit methods. E.g., + ```scala + instance ctx of ExecutionContext = ... + ``` + maps to + ```scala + final implicit def ctx: ExecutionContext = ... + ``` + +### Anonymous Instance Definitions + +Anonymous instance values get compiler synthesized names, which are generated in a reproducible way from the implemented type(s). For +example, if the names of the `IntOrd` and `ListOrd` instances above were left out, the following names would be synthesized instead: +```scala + instance Ord_Int_ev of Ord[Int] { ... } + instance Ord_List_ev[T] of Ord[List[T]] { ... } +``` +The synthesized type names are formed from + + - the simple name(s) of the implemented type(s), leaving out any prefixes, + - the simple name(s) of the toplevel argument type constructors to these types + - the suffix `_ev`. + +Anonymous implicit instances that define extension methods without also implementing a type +get their name from the name of the first extension method and the toplevel type +constructor of its first parameter. For example, the instance +```scala + instance { + def (xs: List[T]) second[T] = ... + } +``` +gets the synthesized name `second_of_List_T_ev`. + +### Inferable Parameters + +The new inferable parameter syntax with `given` corresponds largely to Scala-2's implicit parameters. E.g. +```scala + def max[T](x: T, y: T) given (ord: Ord[T]): T +``` +would be written +```scala + def max[T](x: T, y: T)(implicit ord: Ord[T]): T +``` +in Scala 2. The main difference concerns applications of such parameters. +Explicit arguments to inferable parameters _must_ be written using `given`, +mirroring the definition syntax. E.g, `max(2, 3) given IntOrd`. +Scala 2 uses normal applications `max(2, 3)(IntOrd)` instead. The Scala 2 syntax has some inherent ambiguities and restrictions which are overcome by the new syntax. For instance, multiple implicit parameter lists are not available in the old syntax, even though they can be simulated using auxiliary objects in the "Aux" pattern. + +The `the` method corresponds to `implicitly` in Scala 2. +It is precisely the same as the `the` method in Shapeless. +The difference between `the` (in both versions) and `implicitly` is +that `the` can return a more precise type than the type that was +asked for. + +### Context Bounds + +Context bounds are the same in both language versions. They expand to the respective forms of implicit parameters. + +**Note:** To ease migration, context bounds in Dotty map for a limited time to old-style implicit parameters for which arguments can be passed either with `given` or +with a normal application. Once old-style implicits are deprecated, context bounds +will map to inferable parameters instead. + +### Extension Methods + +Extension methods have no direct counterpart in Scala 2, but they can be simulated with implicit classes. For instance, the extension method +```scala + def (c: Circle) circumference: Double = c.radius * math.Pi * 2 +``` +could be simulated to some degree by +```scala + implicit class CircleDeco(c: Circle) extends AnyVal { + def circumference: Double = c.radius * math.Pi * 2 + } +``` +Extension methods in instance definitions have no direct counterpart in Scala-2. The only way to simulate these is to make implicit classes available through imports. The Simulacrum macro library can automate this process in some cases. + +### Typeclass Derivation + +Typeclass derivation has no direct counterpart in the Scala 2 language. Comparable functionality can be achieved by macro-based libraries such as Shapeless, Magnolia, or scalaz-deriving. + +### Context Query types + +Context Query types have no analogue in Scala 2. + +### Implicit By-Name Parameters + +Implicit by-name parameters are not supported in Scala 2, but can be emulated to some degree by the `Lazy` type in Shapeless. + +## Simulating Scala 2 Implicits in Dotty + +### Implicit Conversions + +Implicit conversion methods in Scala 2 can be expressed as implicit instances of class +`scala.Conversion` in Dotty. E.g. instead of +```scala + implicit def stringToToken(str: String): Token = new Keyword(str) +``` +one can write +```scala + instance stringToToken of Conversion[String, Token] { + def apply(str: String): Token = new KeyWord(str) + } +``` + +### Implicit Classes + +Implicit classes in Scala 2 are often used to define extension methods, which are directly supported in Dotty. Other uses of implicit classes can be simulated by a pair of a regular class and a `Conversion` instance definition. + + +### Implicit Values + +Implicit `val` definitions in Scala 2 can be expressed in Dotty using a regular `val` definition and an instance alias. E.g., Scala 2's +```scala + lazy implicit val pos: Position = tree.sourcePos +``` +can be expressed in Dotty as +```scala + lazy val pos: Position = tree.sourcePos + instance of Position = pos +``` + +### Abstract Implicits + +An abstract implicit `val` or `def` in Scala 2 can be expressed in Dotty using a regular abstract definition and an instance alias. E.g., Scala 2's +```scala + implicit def symDeco: SymDeco +``` +can be expressed in Dotty as +```scala + def symDeco: SymDeco + instance of SymDeco = symDeco +``` + +## Implementation Status and Timeline + +The Dotty implementation implements both Scala-2's implicits and the new abstractions. In fact, support for Scala-2's implicits is an essential part of the common language subset between 2.13/2.14 and Dotty. +Migration to the new abstractions will be supported by making automatic rewritings available. + +Depending on adoption patterns, old style implicits might start to be deprecated in a version following Scala 3.0. diff --git a/docs/docs/reference/contextual-instance/typeclasses.md b/docs/docs/reference/contextual-instance/typeclasses.md new file mode 100644 index 000000000000..f14fc0178504 --- /dev/null +++ b/docs/docs/reference/contextual-instance/typeclasses.md @@ -0,0 +1,64 @@ +--- +layout: doc-page +title: "Implementing Typeclasses" +--- + +Instance definitions, extension methods and context bounds +allow a concise and natural expression of _typeclasses_. Typeclasses are just traits +with canonical implementations defined by instance definitions. Here are some examples of standard typeclasses: + +### Semigroups and monoids: + +```scala +trait SemiGroup[T] { + def (x: T) combine (y: T): T +} +trait Monoid[T] extends SemiGroup[T] { + def unit: T +} +object Monoid { + def apply[T] given Monoid[T] = the[Monoid[T]] +} + +instance of Monoid[String] { + def (x: String) combine (y: String): String = x.concat(y) + def unit: String = "" +} + +instance of Monoid[Int] { + def (x: Int) combine (y: Int): Int = x + y + def unit: Int = 0 +} + +def sum[T: Monoid](xs: List[T]): T = + xs.foldLeft(Monoid[T].unit)(_.combine(_)) +``` + +### Functors and monads: + +```scala +trait Functor[F[_]] { + def (x: F[A]) map [A, B] (f: A => B): F[B] +} + +trait Monad[F[_]] extends Functor[F] { + def (x: F[A]) flatMap [A, B] (f: A => F[B]): F[B] + def (x: F[A]) map [A, B] (f: A => B) = x.flatMap(f `andThen` pure) + + def pure[A](x: A): F[A] +} + +instance ListMonad of Monad[List] { + def (xs: List[A]) flatMap [A, B] (f: A => List[B]): List[B] = + xs.flatMap(f) + def pure[A](x: A): List[A] = + List(x) +} + +instance ReaderMonad[Ctx] of Monad[[X] => Ctx => X] { + def (r: Ctx => A) flatMap [A, B] (f: A => Ctx => B): Ctx => B = + ctx => f(r(ctx))(ctx) + def pure[A](x: A): Ctx => A = + ctx => x +} +```