aboutsummaryrefslogtreecommitdiff
path: root/stdlib/source
diff options
context:
space:
mode:
Diffstat (limited to 'stdlib/source')
-rw-r--r--stdlib/source/lux/compiler/default/syntax.lux81
-rw-r--r--stdlib/source/lux/data/text/lexer.lux64
2 files changed, 59 insertions, 86 deletions
diff --git a/stdlib/source/lux/compiler/default/syntax.lux b/stdlib/source/lux/compiler/default/syntax.lux
index ca715e8dd..50c02c11d 100644
--- a/stdlib/source/lux/compiler/default/syntax.lux
+++ b/stdlib/source/lux/compiler/default/syntax.lux
@@ -50,10 +50,17 @@
(def: digits+ (format "_" ..digits))
(def: white-space Text "\t\v \r\f")
-(def: new-line Text "\n")
+(def: new-line "\n")
(def: new-line^ (l.this new-line))
-(def: text-delimiter^ (l.this "\""))
+(def: text-delimiter "\"")
+(def: text-delimiter^ (l.this text-delimiter))
+
+(def: escape "\\")
+
+(def: sigil "#")
+
+(def: single-line-comment-marker (format ..sigil ..sigil))
## This is the parser for white-space.
## Whenever a new-line is encountered, the column gets reset to 0, and
@@ -78,7 +85,7 @@
(def: (single-line-comment^ where)
(-> Cursor (Lexer Cursor))
(do p.Monad<Parser>
- [_ (l.this "##")
+ [_ (l.this ..single-line-comment-marker)
_ (l.some! (l.none-of! new-line))
_ ..new-line^]
(wrap (|> where
@@ -87,8 +94,8 @@
## This is just a helper parser to find text which doesn't run into
## any special character sequences for multi-line comments.
-(def: multi-line-comment-start^ (l.this "#("))
-(def: multi-line-comment-end^ (l.this ")#"))
+(def: multi-line-comment-start^ (l.this (format ..sigil "(")))
+(def: multi-line-comment-end^ (l.this (format ")" ..sigil)))
(def: multi-line-comment-bound^
(Lexer Any)
@@ -168,19 +175,19 @@
## Escaped characters may show up in Char and Text literals.
(def: escaped-char^
(Lexer [Nat Text])
- (p.after (l.this "\\")
+ (p.after (l.this ..escape)
(do p.Monad<Parser>
[code l.any]
(case code
## Handle special cases.
- "t" (wrap [2 "\t"])
- "v" (wrap [2 "\v"])
- "b" (wrap [2 "\b"])
- "n" (wrap [2 "\n"])
- "r" (wrap [2 "\r"])
- "f" (wrap [2 "\f"])
- "\"" (wrap [2 "\""])
- "\\" (wrap [2 "\\"])
+ "t" (wrap [2 "\t"])
+ "v" (wrap [2 "\v"])
+ "b" (wrap [2 "\b"])
+ "n" (wrap [2 ..new-line])
+ "r" (wrap [2 "\r"])
+ "f" (wrap [2 "\f"])
+ (^ (static ..text-delimiter)) (wrap [2 ..text-delimiter])
+ (^ (static ..escape)) (wrap [2 ..escape])
## Handle unicode escapes.
"u"
@@ -246,7 +253,7 @@
(wrap [(update@ #.column (n/+ (text.size chunk)) where)
[where (#.Nat value)]]))))
-(def: (normal-frac where)
+(def: #export (frac where)
(-> Cursor (Lexer [Cursor Code]))
(do p.Monad<Parser>
[chunk ($_ l.and
@@ -267,40 +274,6 @@
(wrap [(update@ #.column (n/+ (text.size chunk)) where)
[where (#.Frac value)]]))))
-(def: frac-ratio-fragment
- (Lexer Frac)
- (<| (p.codec number.Codec<Text,Frac>)
- (parser/map (function (_ digits)
- (format digits ".0")))
- rich-digits^))
-
-(def: (ratio-frac where)
- (-> Cursor (Lexer [Cursor Code]))
- (do p.Monad<Parser>
- [chunk ($_ l.and
- (p.default "" (l.one-of "-"))
- rich-digits^
- (l.one-of "/")
- rich-digits^)
- value (l.local chunk
- (do @
- [signed? (l.this? "-")
- numerator frac-ratio-fragment
- _ (l.this? "/")
- denominator frac-ratio-fragment
- _ (p.assert "Denominator cannot be 0."
- (not (f/= +0.0 denominator)))]
- (wrap (|> numerator
- (f/* (if signed? -1.0 +1.0))
- (f// denominator)))))]
- (wrap [(update@ #.column (n/+ (text.size chunk)) where)
- [where (#.Frac value)]])))
-
-(def: #export (frac where)
- (-> Cursor (Lexer [Cursor Code]))
- (p.either (normal-frac where)
- (ratio-frac where)))
-
## This parser looks so complex because text in Lux can be multi-line
## and there are rules regarding how this is handled.
(def: #export (text where)
@@ -342,7 +315,7 @@
($_ p.either
## Normal text characters.
(do @
- [normal (l.slice (l.many! (l.none-of! "\\\"\n")))]
+ [normal (l.slice (l.many! (l.none-of! (format ..escape ..text-delimiter ..new-line))))]
(recur (format text-read normal)
(update@ #.column (n/+ (text.size normal)) where)
#0))
@@ -463,13 +436,13 @@
## a digit, to avoid confusion with regards to numbers.
(def: name-part^
(Lexer Text)
- (let [delimiters (format "()[]{}#\"" name-separator)
+ (let [delimiters (format "()[]{}" ..sigil ..text-delimiter ..name-separator)
space (format white-space new-line)
head (l.none-of! (format ..digits delimiters space))
tail (l.some! (l.none-of! (format delimiters space)))]
(l.slice (l.and! head tail))))
-(def: current-module-mark Text (format name-separator name-separator))
+(def: current-module-mark Text (format ..name-separator ..name-separator))
(def: (name^ current-module aliases)
(-> Text Aliases (Lexer [Name Nat]))
@@ -526,8 +499,8 @@
(wrap [(update@ #.column (n/+ <length>) where)
[where (<tag> value)]])))]
- [tag (p.after (l.this "#")) #.Tag ("lux i64 +" 1 length)]
- [identifier (|>) #.Identifier length]
+ [tag (p.after (l.this ..sigil)) #.Tag ("lux i64 +" 1 length)]
+ [identifier (|>) #.Identifier length]
)
(do-template [<name> <value>]
diff --git a/stdlib/source/lux/data/text/lexer.lux b/stdlib/source/lux/data/text/lexer.lux
index 80e2cea0f..677810eb8 100644
--- a/stdlib/source/lux/data/text/lexer.lux
+++ b/stdlib/source/lux/data/text/lexer.lux
@@ -4,14 +4,14 @@
[monad (#+ do Monad)]
["p" parser]]
[data
- ["." text ("text/." Monoid<Text>)]
["." product]
["." maybe]
["e" error]
[collection
["." list ("list/." Fold<List>)]]]
[macro
- ["." code]]])
+ ["." code]]]
+ ["." // ("text/." Monoid<Text>)])
(type: Offset Nat)
@@ -26,7 +26,7 @@
(def: (remaining offset tape)
(-> Offset Text Text)
- (|> tape (text.split offset) maybe.assume product.right))
+ (|> tape (//.split offset) maybe.assume product.right))
(def: cannot-lex-error Text "Cannot lex from empty text.")
@@ -41,7 +41,7 @@
(#e.Error msg)
(#e.Success [[end-offset _] output])
- (if (n/= end-offset (text.size input))
+ (if (n/= end-offset (//.size input))
(#e.Success output)
(#e.Error (unconsumed-input-error end-offset input)))
))
@@ -67,9 +67,9 @@
{#.doc "Just returns the next character without applying any logic."}
(Lexer Text)
(function (_ [offset tape])
- (case (text.nth offset tape)
+ (case (//.nth offset tape)
(#.Some output)
- (#e.Success [[(inc offset) tape] (text.from-code output)])
+ (#e.Success [[(inc offset) tape] (//.from-code output)])
_
(#e.Error cannot-lex-error))))
@@ -102,22 +102,22 @@
{#.doc "Lex a text if it matches the given sample."}
(-> Text (Lexer Any))
(function (_ [offset tape])
- (case (text.index-of' reference offset tape)
+ (case (//.index-of' reference offset tape)
(#.Some where)
(if (n/= offset where)
- (#e.Success [[(n/+ (text.size reference) offset) tape] []])
- (#e.Error ($_ text/compose "Could not match: " (text.encode reference) " @ " (maybe.assume (text.clip' offset tape)))))
+ (#e.Success [[(n/+ (//.size reference) offset) tape] []])
+ (#e.Error ($_ text/compose "Could not match: " (//.encode reference) " @ " (maybe.assume (//.clip' offset tape)))))
_
- (#e.Error ($_ text/compose "Could not match: " (text.encode reference))))))
+ (#e.Error ($_ text/compose "Could not match: " (//.encode reference))))))
(def: #export (this? reference)
{#.doc "Lex a text if it matches the given sample."}
(-> Text (Lexer Bit))
(function (_ (^@ input [offset tape]))
- (case (text.index-of' reference offset tape)
+ (case (//.index-of' reference offset tape)
(^multi (#.Some where) (n/= offset where))
- (#e.Success [[(n/+ (text.size reference) offset) tape] #1])
+ (#e.Success [[(n/+ (//.size reference) offset) tape] #1])
_
(#e.Success [input #0]))))
@@ -126,7 +126,7 @@
{#.doc "Ensure the lexer's input is empty."}
(Lexer Any)
(function (_ (^@ input [offset tape]))
- (if (n/= offset (text.size tape))
+ (if (n/= offset (//.size tape))
(#e.Success [input []])
(#e.Error (unconsumed-input-error offset tape)))))
@@ -134,15 +134,15 @@
{#.doc "Ask if the lexer's input is empty."}
(Lexer Bit)
(function (_ (^@ input [offset tape]))
- (#e.Success [input (n/= offset (text.size tape))])))
+ (#e.Success [input (n/= offset (//.size tape))])))
(def: #export peek
{#.doc "Lex the next character (without consuming it from the input)."}
(Lexer Text)
(function (_ (^@ input [offset tape]))
- (case (text.nth offset tape)
+ (case (//.nth offset tape)
(#.Some output)
- (#e.Success [input (text.from-code output)])
+ (#e.Success [input (//.from-code output)])
_
(#e.Error cannot-lex-error))))
@@ -158,8 +158,8 @@
(-> Nat Nat (Lexer Text))
(do p.Monad<Parser>
[char any
- #let [char' (maybe.assume (text.nth 0 char))]
- _ (p.assert ($_ text/compose "Character is not within range: " (text.from-code bottom) "-" (text.from-code top))
+ #let [char' (maybe.assume (//.nth 0 char))]
+ _ (p.assert ($_ text/compose "Character is not within range: " (//.from-code bottom) "-" (//.from-code top))
(.and (n/>= bottom char')
(n/<= top char')))]
(wrap char)))
@@ -199,10 +199,10 @@
{#.doc (code.text ($_ text/compose "Only lex characters that are" <description-modifier> " part of a piece of text."))}
(-> Text (Lexer Text))
(function (_ [offset tape])
- (case (text.nth offset tape)
+ (case (//.nth offset tape)
(#.Some output)
- (let [output (text.from-code output)]
- (if (<modifier> (text.contains? output options))
+ (let [output (//.from-code output)]
+ (if (<modifier> (//.contains? output options))
(#e.Success [[(inc offset) tape] output])
(#e.Error ($_ text/compose "Character (" output
") is should " <description-modifier>
@@ -220,10 +220,10 @@
{#.doc (code.text ($_ text/compose "Only lex characters that are" <description-modifier> " part of a piece of text."))}
(-> Text (Lexer Slice))
(function (_ [offset tape])
- (case (text.nth offset tape)
+ (case (//.nth offset tape)
(#.Some output)
- (let [output (text.from-code output)]
- (if (<modifier> (text.contains? output options))
+ (let [output (//.from-code output)]
+ (if (<modifier> (//.contains? output options))
(#e.Success [[(inc offset) tape]
{#basis offset
#distance 1}])
@@ -242,11 +242,11 @@
{#.doc "Only lex characters that satisfy a predicate."}
(-> (-> Nat Bit) (Lexer Text))
(function (_ [offset tape])
- (case (text.nth offset tape)
+ (case (//.nth offset tape)
(#.Some output)
(if (p output)
- (#e.Success [[(inc offset) tape] (text.from-code output)])
- (#e.Error ($_ text/compose "Character does not satisfy predicate: " (text.from-code output))))
+ (#e.Success [[(inc offset) tape] (//.from-code output)])
+ (#e.Error ($_ text/compose "Character does not satisfy predicate: " (//.from-code output))))
_
(#e.Error cannot-lex-error))))
@@ -254,7 +254,7 @@
(def: #export space
{#.doc "Only lex white-space."}
(Lexer Text)
- (satisfies text.space?))
+ (satisfies //.space?))
(def: #export (and left right)
(-> (Lexer Text) (Lexer Text) (Lexer Text))
@@ -274,7 +274,7 @@
[(def: #export (<name> lexer)
{#.doc (code.text ($_ text/compose "Lex " <doc-modifier> " characters as a single continuous text."))}
(-> (Lexer Text) (Lexer Text))
- (|> lexer <base> (:: p.Monad<Parser> map text.concat)))]
+ (|> lexer <base> (:: p.Monad<Parser> map //.concat)))]
[some p.some "some"]
[many p.many "many"]
@@ -294,7 +294,7 @@
[(def: #export (<name> amount lexer)
{#.doc (code.text ($_ text/compose "Lex " <doc-modifier> " N characters."))}
(-> Nat (Lexer Text) (Lexer Text))
- (|> lexer (<base> amount) (:: p.Monad<Parser> map text.concat)))]
+ (|> lexer (<base> amount) (:: p.Monad<Parser> map //.concat)))]
[exactly p.exactly "exactly"]
[at-most p.at-most "at most"]
@@ -315,7 +315,7 @@
(def: #export (between from to lexer)
{#.doc "Lex between N and M characters."}
(-> Nat Nat (Lexer Text) (Lexer Text))
- (|> lexer (p.between from to) (:: p.Monad<Parser> map text.concat)))
+ (|> lexer (p.between from to) (:: p.Monad<Parser> map //.concat)))
(def: #export (between! from to lexer)
{#.doc "Lex between N and M characters."}
@@ -344,7 +344,7 @@
(do p.Monad<Parser>
[[basis distance] lexer]
(function (_ (^@ input [offset tape]))
- (case (text.clip basis (n/+ basis distance) tape)
+ (case (//.clip basis (n/+ basis distance) tape)
(#.Some output)
(#e.Success [input output])