diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 08e7ec3..8f6fdce 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -46,4 +46,20 @@ jobs: - uses: codecov/codecov-action@v1 with: file: lcov.info - + docs: + name: 'Documentation' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@v1 + with: + version: '1' + - run: | + julia --project=docs -e ' + using Pkg + Pkg.develop(PackageSpec(path=pwd())) + Pkg.instantiate()' + - run: julia --project=docs --color=yes docs/make.jl + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} diff --git a/README.md b/README.md index f26f748..847554a 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,8 @@ The goals of this package is to be #### Tokenization -The function `tokenize` is the main entrypoint for generating `Token`s. -It takes a string or a buffer and creates an iterator that will sequentially return the next `Token` until the end of string or buffer. The argument to `tokenize` can either be a `String`, `IOBuffer` or an `IOStream`. +The function `tokenize` is the main entrypoint for generating [`Token`](@ref)s. +It takes a string or a buffer and creates an iterator that will sequentially return the next [`Token`](@ref) until the end of string or buffer. The argument to `tokenize` can either be a `String`, `IOBuffer` or an `IOStream`. ```jl julia> collect(tokenize("function f(x) end")) @@ -30,11 +30,11 @@ julia> collect(tokenize("function f(x) end")) 1,18-1,17 ENDMARKER "" ``` -#### `Token`s +#### [`Token`](@ref)s -Each `Token` is represented by where it starts and ends, what string it contains and what type it is. +Each [`Token`](@ref) is represented by where it starts and ends, what string it contains and what type it is. -The API for a `Token` (non exported from the `Tokenize.Tokens` module) is. +The API for a [`Token`](@ref) (non exported from the `Tokenize.Tokens` module) is. ```julia startpos(t)::Tuple{Int, Int} # row and column where the token start diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..a303fff --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,2 @@ +build/ +site/ diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 0000000..dfa65cd --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,2 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..4dc5c8e --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,17 @@ +using Documenter +using Tokenize + +makedocs( + sitename = "Tokenize", + format = Documenter.HTML(), + modules = [Tokenize], + pages=[ + "Home" => "index.md", + ], +) + +deploydocs( + repo = "github.com/JuliaLang/Tokenize.jl.git", + devbranch = "master", + push_preview = true, +) diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..23d6bb1 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,20 @@ +# Tokenize.jl + +Documentation for Tokenize.jl + +```@autodocs +Modules = [ + Tokenize, + Tokenize.Lexers, +] +``` + +```@meta +CurrentModule = Tokenize.Tokens +``` + +```@autodocs +Modules = [ + Tokens, +] +``` \ No newline at end of file diff --git a/src/lexer.jl b/src/lexer.jl index b9600ab..dc31e00 100644 --- a/src/lexer.jl +++ b/src/lexer.jl @@ -66,7 +66,7 @@ Lexer(str::AbstractString, T::Type{TT} = Token) where TT <: AbstractToken = Lexe Returns an `Iterable` containing the tokenized input. Can be reverted by e.g. `join(untokenize.(tokenize(x)))`. Setting `T` chooses the type of token -produced by the lexer (`Token` or `RawToken`). +produced by the lexer ([`Token`](@ref) or [`RawToken`](@ref)). """ tokenize(x, ::Type{Token}) = Lexer(x, Token) tokenize(x, ::Type{RawToken}) = Lexer(x, RawToken) @@ -104,7 +104,7 @@ end """ startpos(l::Lexer) -Return the latest `Token`'s starting position. +Return the latest [`Token`](@ref)'s starting position. """ startpos(l::Lexer) = l.token_startpos @@ -120,7 +120,7 @@ Base.seekstart(l::Lexer) = seek(l.io, l.io_startpos) """ seek2startpos!(l::Lexer) -Sets the lexer's current position to the beginning of the latest `Token`. +Sets the lexer's current position to the beginning of the latest [`Token`](@ref). """ seek2startpos!(l::Lexer) = seek(l, startpos(l)) @@ -157,7 +157,7 @@ Base.seek(l::Lexer, pos) = seek(l.io, pos) """ start_token!(l::Lexer) -Updates the lexer's state such that the next `Token` will start at the current +Updates the lexer's state such that the next [`Token`](@ref) will start at the current position. """ function start_token!(l::Lexer) @@ -241,7 +241,7 @@ end """ emit(l::Lexer, kind::Kind, err::TokenError=Tokens.NO_ERR) -Returns a `Token` of kind `kind` with contents `str` and starts a new `Token`. +Returns a [`Token`](@ref) of kind `kind` with contents `str` and starts a new [`Token`](@ref). """ function emit(l::Lexer{IO_t,Token}, kind::Kind, err::TokenError = Tokens.NO_ERR) where IO_t suffix = false @@ -290,7 +290,7 @@ end """ emit_error(l::Lexer, err::TokenError=Tokens.UNKNOWN) -Returns an `ERROR` token with error `err` and starts a new `Token`. +Returns an `ERROR` token with error `err` and starts a new [`Token`](@ref). """ function emit_error(l::Lexer, err::TokenError = Tokens.UNKNOWN) return emit(l, Tokens.ERROR, err) @@ -300,7 +300,7 @@ end """ next_token(l::Lexer) -Returns the next `Token`. +Returns the next [`Token`](@ref). """ function next_token(l::Lexer, start = true) start && start_token!(l) diff --git a/src/token.jl b/src/token.jl index c9f5051..13d9335 100644 --- a/src/token.jl +++ b/src/token.jl @@ -48,6 +48,9 @@ TOKEN_ERROR_DESCRIPTION = Dict{TokenError, String}( abstract type AbstractToken end +""" +Each [`Token`](@ref) is represented by where it starts and ends, what string it contains and what type it is. +""" struct Token <: AbstractToken kind::Kind # Offsets into a string or buffer @@ -66,6 +69,9 @@ Token(kind, startposition, endposition, startbyte, endbyte, val, NO_ERR, false, end Token() = Token(ERROR, (0,0), (0,0), 0, 0, "", UNKNOWN, false, false) +""" +Like [`Token`](@ref), but without the `val` field. +""" struct RawToken <: AbstractToken kind::Kind # Offsets into a string or buffer