# Copyright (C) 2017 The Meme Factory, Inc.  http://www.meme.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Karl O. Pinc <kop@meme.com>

import pytest
from enforcer import exceptions as ex
from enforcer import rules_lex

# Global lexer, cloned so we start with fresh state
lexer = rules_lex.lexer


# Helper functions

def get_type_val_list(lexer):
    '''Get a list of type, value) tuples from the lexer'''
    return [(tok.type, tok.value) for tok in lexer]


# Tests

def test_paren_str():
    '''Do we get a paren_str token?'''
    lex = lexer.clone()
    lex.input('A regular string (parenthesized content)')
    assert (get_type_val_list(lex) ==
            [('WORD', 'A'),
             ('SPACE', ' '),
             ('WORD', 'regular'),
             ('SPACE', ' '),
             ('WORD', 'string'),
             ('PAREN_STR', ' (parenthesized content)')])


def test_double_paren_str():
    '''Do we get 2 paren_str tokens?'''
    lex = lexer.clone()
    lex.input('two words (some note) (another note)')
    assert (get_type_val_list(lex) ==
            [('WORD', 'two'),
             ('SPACE', ' '),
             ('WORD', 'words'),
             ('PAREN_STR', ' (some note)'),
             ('PAREN_STR', ' (another note)')])


def test_user_str():
    '''Do we get a user string and dashes?'''
    lex = lexer.clone()
    lex.input('A regular string - <user string>')
    assert (get_type_val_list(lex) ==
            [('WORD', 'A'),
             ('SPACE', ' '),
             ('WORD', 'regular'),
             ('SPACE', ' '),
             ('WORD', 'string'),
             ('DASH', ' - '),
             ('USER_STR', '<user string>')])


def test_version():
    '''Do we get a version number?'''
    lex = lexer.clone()
    lex.input('A regular string v<#>')
    assert (get_type_val_list(lex) ==
            [('WORD', 'A'),
             ('SPACE', ' '),
             ('WORD', 'regular'),
             ('SPACE', ' '),
             ('WORD', 'string'),
             ('VERSION', ' v<#>')])


def test_bad_input():
    '''Raise an exception when input is bad'''
    lex = lexer.clone()
    lex.input('A regular string <>')
    with pytest.raises(ex.LexError):
        list(lex)


def test_numbered_list():
    '''A numbered list gets the expected tokens'''
    lex = lexer.clone()
    lex.input('<#>. A regular string')
    assert (get_type_val_list(lex) ==
            [('HASH', '<#>'),
             ('PERIOD', '.'),
             ('SPACE', ' '),
             ('WORD', 'A'),
             ('SPACE', ' '),
             ('WORD', 'regular'),
             ('SPACE', ' '),
             ('WORD', 'string')])


def test_date():
    '''A date gets the expected tokens'''
    lex = lexer.clone()
    lex.input('A regular string - <yyyy-mm-dd>')
    assert (get_type_val_list(lex) ==
            [('WORD', 'A'),
             ('SPACE', ' '),
             ('WORD', 'regular'),
             ('SPACE', ' '),
             ('WORD', 'string'),
             ('DASH', ' - '),
             ('DATE', '<yyyy-mm-dd>')])


def test_words_w_periods():
    '''Words can have periods in them'''
    lex = lexer.clone()
    lex.input('word. .Word word.word')
    assert (get_type_val_list(lex) ==
            [('WORD', 'word.'),
             ('SPACE', ' '),
             ('WORD', '.Word'),
             ('SPACE', ' '),
             ('WORD', 'word.word')])


def test_periods():
    '''Periods are lexical tokens'''
    lex = lexer.clone()
    lex.input('<something>. . .<something>')
    assert(get_type_val_list(lex) ==
           [('USER_STR', '<something>'),
            ('PERIOD', '.'),
            ('SPACE', ' '),
            ('PERIOD', '.'),
            ('SPACE', ' '),
            ('PERIOD', '.'),
            ('USER_STR', '<something>')])


def test_braces():
    '''Square braces should return the right tokens'''
    lex = lexer.clone()
    lex.input('A regular string[ - <entity>]')
    assert(get_type_val_list(lex) ==
           [('WORD', 'A'),
            ('SPACE', ' '),
            ('WORD', 'regular'),
            ('SPACE', ' '),
            ('WORD', 'string'),
            ('LBRACE', '['),
            ('DASH', ' - '),
            ('ENTITY', '<entity>'),
            ('RBRACE', ']')])
