$include_dir="/home/hyper-archives/boost-commit/include"; include("$include_dir/msg-header.inc") ?>
Subject: [Boost-commit] svn:boost r52646 - trunk/libs/spirit/example/lex
From: hartmut.kaiser_at_[hidden]
Date: 2009-04-27 22:19:29
Author: hkaiser
Date: 2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
New Revision: 52646
URL: http://svn.boost.org/trac/boost/changeset/52646
Log:
Spirit: Modified some of the lexer example comments
Text files modified: 
   trunk/libs/spirit/example/lex/example4.cpp           |    20 ++++++++++----------                    
   trunk/libs/spirit/example/lex/example5.cpp           |    16 ++++++++--------                        
   trunk/libs/spirit/example/lex/example6.cpp           |    16 ++++++++--------                        
   trunk/libs/spirit/example/lex/print_numbers.cpp      |     2 +-                                      
   trunk/libs/spirit/example/lex/word_count.cpp         |     2 +-                                      
   trunk/libs/spirit/example/lex/word_count_functor.cpp |     4 ++--                                    
   trunk/libs/spirit/example/lex/word_count_lexer.cpp   |     4 ++--                                    
   7 files changed, 32 insertions(+), 32 deletions(-)
Modified: trunk/libs/spirit/example/lex/example4.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example4.cpp	(original)
+++ trunk/libs/spirit/example/lex/example4.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -5,12 +5,12 @@
 
 //  This example shows how to create a simple lexer recognizing a couple of 
 //  different tokens aimed at a simple language and how to use this lexer with 
-//  a grammar. It shows how to associate values to tokens and how to access the 
-//  token values from inside the grammar.
+//  a grammar. It shows how to associate attributes to tokens and how to access 
+//  the token attributes from inside the grammar.
 //
-//  We use explicit token value types, making the corresponding token instances
+//  We use explicit token attribute types, making the corresponding token instances
 //  carry convert the matched input into an instance of that type. The token 
-//  value is exposed as the parser attribute if this token is used as a 
+//  attribute is exposed as the parser attribute if this token is used as a 
 //  parser component somewhere in a grammar.
 //
 //  Additionally, this example demonstrates, how to define a token set usable 
@@ -72,7 +72,7 @@
     // these tokens expose the iterator_range of the matched input sequence
     token_def<> if_, else_, while_;
 
-    // The following two tokens have an associated value type, 'identifier'
+    // The following two tokens have an associated attribute type, 'identifier'
     // carries a string (the identifier name) and 'constant' carries the 
     // matched integer value.
     //
@@ -81,7 +81,7 @@
     //       well (see the typedef for the token_type below).
     //
     // The conversion of the matched input to an instance of this type occurs
-    // once (on first access), which makes token values as efficient as 
+    // once (on first access), which makes token attributes as efficient as 
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only, 
     // avoiding them being copied around.
@@ -173,13 +173,13 @@
     // calculator_tokens<> above). Here we use the predefined lexertl token 
     // type, but any compatible token type may be used instead.
     //
-    // If you don't list any token value types in the following declaration 
+    // If you don't list any token attribute types in the following declaration 
     // (or just use the default token type: lexertl_token<base_iterator_type>)  
     // it will compile and work just fine, just a bit less efficient. This is  
-    // because the token value will be generated from the matched input  
+    // because the token attribute will be generated from the matched input  
     // sequence every time it is requested. But as soon as you specify at 
-    // least one token value type you'll have to list all value types used  
-    // for token_def<> declarations in the token definition class above,  
+    // least one token attribute type you'll have to list all attribute types 
+    // used for token_def<> declarations in the token definition class above, 
     // otherwise compilation errors will occur.
     typedef lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string> 
Modified: trunk/libs/spirit/example/lex/example5.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example5.cpp	(original)
+++ trunk/libs/spirit/example/lex/example5.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -5,8 +5,8 @@
 
 //  This example shows how to create a simple lexer recognizing a couple of 
 //  different tokens aimed at a simple language and how to use this lexer with 
-//  a grammar. It shows how to associate values to tokens and how to access the 
-//  token values from inside the grammar.
+//  a grammar. It shows how to associate attributes to tokens and how to access the 
+//  token attributes from inside the grammar.
 //
 //  Additionally, this example demonstrates, how to define a token set usable 
 //  as the skip parser during parsing, allowing to define several tokens to be 
@@ -86,7 +86,7 @@
     //       well (see the typedef for the token_type below).
     //
     // The conversion of the matched input to an instance of this type occurs
-    // once (on first access), which makes token values as efficient as 
+    // once (on first access), which makes token attributes as efficient as 
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only, 
     // avoiding them being copied around.
@@ -186,7 +186,7 @@
         this->base_type::init_token_definitions();
     }
 
-    // this token has no value
+    // this token has no attribute
     token_def<omitted> else_;
 };
 
@@ -218,13 +218,13 @@
     // example5_base_tokens<> above). Here we use the predefined lexertl token 
     // type, but any compatible token type may be used instead.
     //
-    // If you don't list any token value types in the following declaration 
+    // If you don't list any token attribute types in the following declaration 
     // (or just use the default token type: lexertl_token<base_iterator_type>)  
     // it will compile and work just fine, just a bit less efficient. This is  
-    // because the token value will be generated from the matched input  
+    // because the token attribute will be generated from the matched input  
     // sequence every time it is requested. But as soon as you specify at 
-    // least one token value type you'll have to list all value types used  
-    // for token_def<> declarations in the token definition class above,  
+    // least one token attribute type you'll have to list all attribute types 
+    // used for token_def<> declarations in the token definition class above,  
     // otherwise compilation errors will occur.
     typedef lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string> 
Modified: trunk/libs/spirit/example/lex/example6.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example6.cpp	(original)
+++ trunk/libs/spirit/example/lex/example6.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -5,8 +5,8 @@
 
 //  This example shows how to create a simple lexer recognizing a couple of 
 //  different tokens aimed at a simple language and how to use this lexer with 
-//  a grammar. It shows how to associate values to tokens and how to access the 
-//  token values from inside the grammar.
+//  a grammar. It shows how to associate attributes to tokens and how to access the 
+//  token attributes from inside the grammar.
 //
 //  Additionally, this example demonstrates, how to define a token set usable 
 //  as the skip parser during parsing, allowing to define several tokens to be 
@@ -94,7 +94,7 @@
         this->self("WS") = white_space;
     }
 
-    // The following two tokens have an associated value type, identifier 
+    // The following two tokens have an associated attribute type, identifier 
     // carries a string (the identifier name) and constant carries the matched 
     // integer value.
     //
@@ -103,7 +103,7 @@
     //       well (see the typedef for the token_type below).
     //
     // The conversion of the matched input to an instance of this type occurs
-    // once (on first access), which makes token values as efficient as 
+    // once (on first access), which makes token attributes as efficient as 
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only, 
     // avoiding them being copied around.
@@ -196,13 +196,13 @@
     // calculator_tokens<> above). Here we use the predefined lexertl token 
     // type, but any compatible token type may be used instead.
     //
-    // If you don't list any token value types in the following declaration 
+    // If you don't list any token attribute types in the following declaration 
     // (or just use the default token type: lexertl_token<base_iterator_type>)  
     // it will compile and work just fine, just a bit less efficient. This is  
-    // because the token value will be generated from the matched input  
+    // because the token attribute will be generated from the matched input  
     // sequence every time it is requested. But as soon as you specify at 
-    // least one token value type you'll have to list all value types used  
-    // for token_def<> declarations in the token definition class above,  
+    // least one token attribute type you'll have to list all attribute types 
+    // used for token_def<> declarations in the token definition class above,  
     // otherwise compilation errors will occur.
     typedef lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string> 
Modified: trunk/libs/spirit/example/lex/print_numbers.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/print_numbers.cpp	(original)
+++ trunk/libs/spirit/example/lex/print_numbers.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -78,7 +78,7 @@
     typedef std::string::iterator base_iterator_type;
 
     // the token type to be used, 'int' is available as the type of the token 
-    // value and no lexer state is supported
+    // attribute and no lexer state is supported
     typedef lexertl::token<base_iterator_type, boost::mpl::vector<int>
       , boost::mpl::false_> token_type;
 
Modified: trunk/libs/spirit/example/lex/word_count.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count.cpp	(original)
+++ trunk/libs/spirit/example/lex/word_count.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -127,7 +127,7 @@
 int main(int argc, char* argv[])
 {
 /*< define the token type to be used: `std::string` is available as the 
-     type of the token value 
+     type of the token attribute 
 >*/  typedef lexertl::token<
         char const*, boost::mpl::vector<std::string>
     > token_type;
Modified: trunk/libs/spirit/example/lex/word_count_functor.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count_functor.cpp	(original)
+++ trunk/libs/spirit/example/lex/word_count_functor.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -124,8 +124,8 @@
         switch (t.id()) {
         case ID_WORD:       // matched a word
         // since we're using a default token type in this example, every 
-        // token instance contains a `iterator_range<BaseIterator>` as its 
-        // token value pointing to the matched character sequence in the input 
+        // token instance contains a `iterator_range<BaseIterator>` as its token
+        // attribute pointing to the matched character sequence in the input 
             ++w; c += t.value().size(); 
             break;
         case ID_EOL:        // matched a newline character
Modified: trunk/libs/spirit/example/lex/word_count_lexer.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count_lexer.cpp	(original)
+++ trunk/libs/spirit/example/lex/word_count_lexer.cpp	2009-04-27 22:19:27 EDT (Mon, 27 Apr 2009)
@@ -91,8 +91,8 @@
     // read input from the given file
     std::string str (read_from_file(1 == argc ? "word_count.input" : argv[1]));
 
-    // Specifying 'omitted' as the token value type generates a token class not
-    // holding any token value at all (not even the iterator_range of the 
+    // Specifying 'omitted' as the token attribute type generates a token class 
+    // notholding any token attribute at all (not even the iterator_range of the 
     // matched input sequence), therefor optimizing the token, the lexer, and 
     // possibly the parser implementation as much as possible. 
     //