latest state of sources

Fixes #4982

[SVN r75007]
This commit is contained in:
Gennadiy Rozental 2011-10-17 11:13:55 +00:00
parent 743a0220a2
commit dbec26921f
29 changed files with 2762 additions and 1425 deletions

View File

@ -1,146 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:attributeGroup name="TestUnitAttributes">
<xs:attribute name="name" type="xs:string" use="required">
<xs:annotation>
<xs:documentation>name of the test unit</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="skipped" type="xs:string" use="optional" fixed="yes">
<xs:annotation>
<xs:documentation>Specified with value "yes" only if test unit was skipped during execution</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:attributeGroup>
<xs:complexType name="LogEntry">
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="line" type="xs:integer" use="required">
<xs:annotation>
<xs:documentation>Line number corresponding to the log entry</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="file" type="xs:anyURI" use="required">
<xs:annotation>
<xs:documentation>file name corresponding to the log entry</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:complexType name="ExceptionEntry" mixed="true">
<xs:annotation>
<xs:documentation>Exception description</xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:element name="LastCheckpoint" type="LogEntry">
<xs:annotation>
<xs:documentation>Location of last checkpoint before exception occured</xs:documentation>
</xs:annotation>
</xs:element>
</xs:sequence>
<xs:attribute name="name" type="xs:string" use="optional">
<xs:annotation>
<xs:documentation>obsolete?</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
<xs:element name="TestCase">
<xs:complexType>
<xs:sequence>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:annotation>
<xs:documentation>different log entries</xs:documentation>
</xs:annotation>
<xs:element name="Info" type="LogEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to the successfully passed assertion</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="Message" type="LogEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to the message generated during test execution</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="Warning" type="LogEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to the warning generated during test execution</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="Error" type="LogEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to the non-fatal error occured during test execution</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="FatalError" type="LogEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to the fatal error occured during test execution</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="Exception" type="ExceptionEntry">
<xs:annotation>
<xs:documentation>Log entry corresponding to an exception occured during test execution</xs:documentation>
</xs:annotation>
</xs:element>
</xs:choice>
<xs:element name="TestingTime" type="xs:float">
<xs:annotation>
<xs:documentation>approximate time spent on test unit execution</xs:documentation>
</xs:annotation>
</xs:element>
</xs:sequence>
<xs:attributeGroup ref="TestUnitAttributes"/>
</xs:complexType>
</xs:element>
<xs:element name="TestSuite">
<xs:complexType>
<xs:sequence>
<xs:annotation>
<xs:documentation>List of test units composing test suite</xs:documentation>
</xs:annotation>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element ref="TestCase"/>
<xs:element ref="TestSuite"/>
</xs:choice>
</xs:sequence>
<xs:attributeGroup ref="TestUnitAttributes"/>
</xs:complexType>
</xs:element>
<xs:element name="TestLog">
<xs:annotation>
<xs:documentation>Holds whole unit test log content</xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:sequence>
<xs:element name="BuildInfo" minOccurs="0">
<xs:annotation>
<xs:documentation>Optional build information for the unit test. </xs:documentation>
</xs:annotation>
<xs:complexType>
<xs:attribute name="platform" type="xs:string">
<xs:annotation>
<xs:documentation>unique identifier for the platform unit test was compiled on</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="compiler" type="xs:string">
<xs:annotation>
<xs:documentation>unique identifier for the compiler unit test was compiled with</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="stl" type="xs:string">
<xs:annotation>
<xs:documentation>unique identifier for the STL implementation used during unit test compilation</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="boost" type="xs:string">
<xs:annotation>
<xs:documentation>version of the boost used</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
</xs:element>
<xs:element ref="TestSuite"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -1,82 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- edited with XMLSpy v2007 (http://www.altova.com) by rogeeff (boost) -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="StatusType">
<xs:restriction base="xs:string">
<xs:enumeration value="passed"/>
<xs:enumeration value="skipped"/>
<xs:enumeration value="aborted"/>
<xs:enumeration value="failed"/>
</xs:restriction>
</xs:simpleType>
<xs:attributeGroup name="TestUnitResult">
<xs:attribute name="name" type="xs:string">
<xs:annotation>
<xs:documentation>name of the test unit</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="result" type="StatusType">
<xs:annotation>
<xs:documentation>result status for the test unit: passed, failed, skipped or aborted</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="assertions_passed" type="xs:string">
<xs:annotation>
<xs:documentation>number of assertions that passed during execution of the test unit</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="assertions_failed" type="xs:string">
<xs:annotation>
<xs:documentation>number of assertion that failed during execution of the test unit</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="expected_failures" type="xs:string">
<xs:annotation>
<xs:documentation>number of assertions that expected to fail in the test unit</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:attributeGroup>
<xs:element name="TestCase">
<xs:complexType>
<xs:attributeGroup ref="TestUnitResult"/>
</xs:complexType>
</xs:element>
<xs:element name="TestSuite">
<xs:complexType>
<xs:sequence>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element ref="TestSuite"/>
<xs:element ref="TestCase"/>
</xs:choice>
</xs:sequence>
<xs:attributeGroup ref="TestUnitResult"/>
<xs:attribute name="test_cases_passed" type="xs:integer">
<xs:annotation>
<xs:documentation>number of test cases that passed in the test suite</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="test_cases_failed" type="xs:integer">
<xs:annotation>
<xs:documentation>number of test cases that failed in the test suite</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="test_cases_skipped" type="xs:integer">
<xs:annotation>
<xs:documentation>number of test cases that were skipped in the test suite</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="test_cases_aborted" type="xs:integer">
<xs:annotation>
<xs:documentation>number of test cases in the test suite that were aborted during execution by an exception or a fatal error</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
</xs:element>
<xs:element name="TestResult">
<xs:complexType>
<xs:sequence>
<xs:element ref="TestSuite"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View File

@ -20,38 +20,6 @@
</tocentry>
</tocentry>
<tocentry linkend="execution-monitor">
<?dbhtml filename="execution-monitor.html"?>
<tocentry linkend="execution-monitor.compilation">
<?dbhtml filename="execution-monitor/compilation.html"?>
</tocentry>
<tocentry linkend="execution-monitor.user-guide">
<?dbhtml filename="execution-monitor/user-guide.html"?>
</tocentry>
<tocentry linkend="execution-monitor.reference">
<?dbhtml filename="execution-monitor/reference.html"?>
</tocentry>
</tocentry>
<tocentry linkend="pem">
<?dbhtml filename="prg-exec-monitor.html"?>
<tocentry linkend="pem.impl">
<?dbhtml filename="prg-exec-monitor/impl.html"?>
</tocentry>
<tocentry linkend="pem.compilation">
<?dbhtml filename="prg-exec-monitor/compilation.html"?>
</tocentry>
</tocentry>
<tocentry linkend="minimal">
<?dbhtml filename="minimal.html"?>
</tocentry>
<tocentry linkend="utf">
<?dbhtml filename="utf.html"?>
<tocentry linkend="utf.intro">
@ -104,46 +72,37 @@
<?dbhtml filename="utf/user-guide/usage-variants/extern-test-runner-variant.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.test-runners">
<?dbhtml filename="utf/user-guide/test-runners.html"?>
<tocentry linkend="utf.user-guide.external-test-runner">
<?dbhtml filename="utf/user-guide/usage-variants/extern-test-runner.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.initialization">
<?dbhtml filename="utf/user-guide/initialization.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization">
<?dbhtml filename="utf/user-guide/test-organization.html"?>
<tocentry linkend="utf.user-guide.test-organization.nullary-test-case">
<?dbhtml filename="utf/user-guide/test-organization/nullary-test-case.html"?>
<tocentry linkend="utf.user-guide.test-organization.manual-nullary-test-case">
<?dbhtml filename="utf/user-guide/test-organization/manual-nullary-test-case.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.auto-nullary-test-case">
<?dbhtml filename="utf/user-guide/test-organization/auto-nullary-test-case.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.manual-nullary-test-case">
<?dbhtml filename="utf/user-guide/test-organization/manual-nullary-test-case.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.unary-test-case">
<?dbhtml filename="utf/user-guide/test-organization/unary-test-case.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.test-case-template">
<?dbhtml filename="utf/user-guide/test-organization/test-case-template.html"?>
<tocentry linkend="utf.user-guide.test-organization.manual-test-case-template">
<?dbhtml filename="utf/user-guide/test-organization/manual-test-case-template.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.auto-test-case-template">
<?dbhtml filename="utf/user-guide/test-organization/auto-test-case-template.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.manual-test-case-template">
<?dbhtml filename="utf/user-guide/test-organization/manual-test-case-template.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.test-suite">
<?dbhtml filename="utf/user-guide/test-organization/test-suite.html"?>
<tocentry linkend="utf.user-guide.test-organization.manual-test-suite">
<?dbhtml filename="utf/user-guide/test-organization/manual-test-suite.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.auto-test-suite">
<?dbhtml filename="utf/user-guide/test-organization/auto-test-suite.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.manual-test-suite">
<?dbhtml filename="utf/user-guide/test-organization/manual-test-suite.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.test-organization.master-test-suite">
<?dbhtml filename="utf/user-guide/test-organization/master-test-suite.html"?>
</tocentry>
@ -152,21 +111,27 @@
<?dbhtml filename="utf/user-guide/test-organization/expected-failures.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.fixture">
<?dbhtml filename="utf/user-guide/fixture.html"?>
<tocentry linkend="utf.user-guide.fixture.model">
<?dbhtml filename="utf/user-guide/fixture/model.html"?>
<tocentry linkend="utf.user-guide.testing-tools">
<?dbhtml filename="utf/user-guide/testing-tools.html"?>
<tocentry linkend="utf.user-guide.testing-tools.output-test">
<?dbhtml filename="utf/user-guide/testing-tools/output-test.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.per-test-case">
<?dbhtml filename="utf/user-guide/fixture/per-test-case.html"?>
<tocentry linkend="utf.user-guide.testing-tools.custom-predicate">
<?dbhtml filename="utf/user-guide/testing-tools/custom-predicate.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.test-suite-shared">
<?dbhtml filename="utf/user-guide/fixture/test-suite-shared.html"?>
<tocentry linkend="utf.user-guide.testing-tools.fpv-comparison">
<?dbhtml filename="utf/user-guide/testing-tools/floating_point_comparison.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.global">
<?dbhtml filename="utf/user-guide/fixture/global.html"?>
<tocentry linkend="utf.user-guide.testing-tools.reference">
<?dbhtml filename="utf/user-guide/testing-tools/reference.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.user-guide.test-output">
<?dbhtml filename="utf/user-guide/test-output.html"?>
<tocentry linkend="utf.user-guide.test-output.log">
@ -211,25 +176,32 @@
<?dbhtml filename="utf/user-guide/runtime-config/reference.html"?>
</tocentry>
</tocentry>
</tocentry>
<tocentry linkend="utf.testing-tools">
<?dbhtml filename="utf/testing-tools.html"?>
<tocentry linkend="utf.testing-tools.output-test">
<?dbhtml filename="utf/testing-tools/output-test.html"?>
<tocentry linkend="utf.user-guide.fixture">
<?dbhtml filename="utf/user-guide/fixture.html"?>
<tocentry linkend="utf.user-guide.fixture.model">
<?dbhtml filename="utf/user-guide/fixture/model.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.per-test-case">
<?dbhtml filename="utf/user-guide/fixture/per-test-case.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.test-suite-shared">
<?dbhtml filename="utf/user-guide/fixture/test-suite-shared.html"?>
</tocentry>
<tocentry linkend="utf.user-guide.fixture.global">
<?dbhtml filename="utf/user-guide/fixture/global.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.testing-tools.custom-predicate">
<?dbhtml filename="utf/testing-tools/custom-predicate.html"?>
<tocentry linkend="utf.user-guide.initialization">
<?dbhtml filename="utf/user-guide/initialization.html"?>
</tocentry>
<tocentry linkend="utf.testing-tools.fpv-comparison">
<?dbhtml filename="utf/testing-tools/floating_point_comparison.html"?>
<tocentry linkend="utf.user-guide.test-runners">
<?dbhtml filename="utf/user-guide/test-runners.html"?>
<tocentry linkend="utf.user-guide.external-test-runner">
<?dbhtml filename="utf/user-guide/usage-variants/extern-test-runner.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.testing-tools.reference">
<?dbhtml filename="utf/testing-tools/reference.html"?>
<tocentry linkend="utf.user-guide.glossary">
<?dbhtml filename="utf/user-guide/glossary.html"?>
</tocentry>
</tocentry>
@ -248,6 +220,43 @@
<?dbhtml filename="utf/usage-recommendations/command-line-specific.html"?>
</tocentry>
</tocentry>
<tocentry linkend="utf.examples">
<?dbhtml filename="utf/examples-collection.html"?>
</tocentry>
</tocentry>
<tocentry linkend="execution-monitor">
<?dbhtml filename="execution-monitor.html"?>
<tocentry linkend="execution-monitor.compilation">
<?dbhtml filename="execution-monitor/compilation.html"?>
</tocentry>
<tocentry linkend="execution-monitor.user-guide">
<?dbhtml filename="execution-monitor/user-guide.html"?>
</tocentry>
<tocentry linkend="execution-monitor.reference">
<?dbhtml filename="execution-monitor/reference.html"?>
</tocentry>
</tocentry>
<tocentry linkend="pem">
<?dbhtml filename="prg-exec-monitor.html"?>
<tocentry linkend="pem.impl">
<?dbhtml filename="prg-exec-monitor/impl.html"?>
</tocentry>
<tocentry linkend="pem.compilation">
<?dbhtml filename="prg-exec-monitor/compilation.html"?>
</tocentry>
</tocentry>
<tocentry linkend="minimal">
<?dbhtml filename="minimal.html"?>
</tocentry>
</tocentry>
</toc>

View File

@ -18,6 +18,9 @@
<year>2006</year>
<year>2007</year>
<year>2008</year>
<year>2009</year>
<year>2010</year>
<year>2011</year>
<holder>Gennadiy Rozental</holder>
</copyright>
@ -156,10 +159,10 @@
</section>
</section>
<xi:include href="utf.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="execution-monitor.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="program-execution-monitor.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="minimal-testing.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<!-- TO FIX: index -->
</library>

View File

@ -1,7 +1,7 @@

Microsoft Visual Studio Solution File, Format Version 9.00
# Visual Studio 2005
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "example", "example.vcproj", "{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}"
Microsoft Visual Studio Solution File, Format Version 11.00
# Visual C++ Express 2010
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "example", "example.vcxproj", "{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution

View File

@ -1,196 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="8.00"
Name="example"
ProjectGUID="{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}"
Keyword="Win32Proj"
>
<Platforms>
<Platform
Name="Win32"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../../../../../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE;"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
UsePrecompiledHeader="0"
WarningLevel="3"
Detect64BitPortabilityProblems="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
LinkIncremental="2"
GenerateDebugInformation="true"
SubSystem="1"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
CommandLine="&quot;$(TargetDir)\$(TargetName).exe&quot; --result_code=no"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories="../../../../../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE;"
RuntimeLibrary="2"
UsePrecompiledHeader="0"
WarningLevel="3"
Detect64BitPortabilityProblems="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
LinkIncremental="2"
GenerateDebugInformation="true"
SubSystem="1"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCWebDeploymentTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
>
</Filter>
<Filter
Name="Source Files"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
<File
RelativePath="..\snippet\snippet18.cpp"
>
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@ -0,0 +1,89 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClCompile Include="example38.cpp" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{9C8197C8-60ED-4D4F-9FFE-F3DFC4C57AE5}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
<OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Debug\</OutDir>
<IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Debug\</IntDir>
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
<OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Release\</OutDir>
<IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Release\</IntDir>
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>../../../../../;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>true</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<DebugInformationFormat>EditAndContinue</DebugInformationFormat>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<SubSystem>Console</SubSystem>
<TargetMachine>MachineX86</TargetMachine>
</Link>
<PostBuildEvent>
<Command>"$(TargetDir)\$(TargetName).exe" --result_code=no</Command>
</PostBuildEvent>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>../../../../../;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<SubSystem>Console</SubSystem>
<OptimizeReferences>true</OptimizeReferences>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<TargetMachine>MachineX86</TargetMachine>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -3,7 +3,7 @@
//____________________________________________________________________________//
int foo() { throw std::runtime_exception( "big trouble" ); }
int foo() { throw std::runtime_error( "big trouble" ); }
//____________________________________________________________________________//

View File

@ -7,9 +7,9 @@
BOOST_AUTO_TEST_CASE( test )
{
double res = std::sin( 45. );
double res = std::sin( 45. ); // sin 45 radians is actually ~ 0.85, sin 45 degrees is ~0.707
BOOST_WARN_MESSAGE( res > 1, "sin(45){" << res << "} is <= 1. Hmm.. Strange. " );
BOOST_WARN_MESSAGE( res < 0.71, "sin(45){" << res << "} is > 0.71. Arg is not in radian?" );
}
//____________________________________________________________________________//

View File

@ -1,5 +1,5 @@
> example --log_level=warning
Running 1 test case...
test.cpp(12): warning in "test": sin(45){0.850904} is <= 1. Hmm.. Strange.
test.cpp(12): warning in "test": sin(45){0.850904} is > 0.71. Arg is not in radian?
*** No errors detected

View File

@ -256,7 +256,7 @@
second and so on). Unfortunately this feature is, at the moment, implemented only for the Microsoft family of
compilers (and Intel, if it employs Microsoft C Runtime Library). Also it can not be tuned per instance of the
monitor and is only triggered globally and reported after the whole program execution is done. In a future this
ought to be improved. An interface is composed from two free functions residing in namespace boost:
ought to be improved. An interface is composed from two free functions residing in namespace boost::debug:
</para>
<!-- TO FIX -->
@ -266,8 +266,8 @@ void break_memory_alloc( long mem_alloc_order_num );</programlisting>
<para role="first-line-indented">
Use function detect_memory_leaks to switch memory leaks detection on/off. Use break_memory_alloc to break a
program execution at allocation specified by mem_alloc_order_num argument. The Unit Test Framework
provides a runtime parameter (--detect_memory_leak=yes or no) allowing you to manage this feature during monitored
unit tests.
provides a runtime parameter (--detect_memory_leaks=0 or 1 or N>1, where N is memory allocation number)
allowing you to manage this feature during monitored unit tests.
</para>
</section>
</section>

View File

@ -30,7 +30,7 @@
<answer>
<para role="first-line-indented">
You can send a bug report to the boost users' mailing list and/or directly to
<ulink url="mailto:boost-test -at- emailacocunt -dot- com">Gennadiy Rozental</ulink>.
<ulink url="mailto:boost-test =at= emailaccount =dot= com">Gennadiy Rozental</ulink>.
</para>
</answer>
</qandaentry>
@ -44,7 +44,7 @@
<answer>
<para role="first-line-indented">
You can send a request to the boost developers' mailing list and/or directly to
<ulink url="mailto:boost-test -at- emailacocunt -dot- com">Gennadiy Rozental</ulink>.
<ulink url="mailto:boost-test =at= emailaccount -dot- com">Gennadiy Rozental</ulink>.
</para>
</answer>
</qandaentry>

View File

@ -14,7 +14,7 @@
original version of Boost.Test. As the name suggest, it provides only minimal basic facilities for test creation. It
have no configuration parameters (either command line arguments or environment variables) and it supplies
a limited set of <link linkend="minimal.tools">testing tools</link> which behaves similarly to ones defined amount
the Unit Test Framework <link linkend="utf.testing-tools">Testing tools</link>. The &mtf; supplies its own function
the Unit Test Framework <link linkend="utf.user-guide.testing-tools">Testing tools</link>. The &mtf; supplies its own function
main() (so can not be used for multi unit testing) and will execute the test program in a monitored environment.
</para>
@ -131,7 +131,7 @@
</inline-synopsis>
<para role="first-line-indented">
Their behavior is modeled after the <link linkend="utf.testing-tools.reference">similarly named tools</link>
Their behavior is modeled after the <link linkend="utf.user-guide.testing-tools.reference">similarly named tools</link>
implemented by the Unit Test Framework.
</para>
</section>

View File

@ -40,7 +40,7 @@
<para role="first-line-indented">
Uniform error reporting can be also useful in test environments such as the Boost regression tests. Be aware though
in such case it might be preferable to use the <link linkend="utf">Unit Test Framework</link>, cause it allows one
to use the <link linkend="utf.testing-tools">Testing tools</link> and generate more detailed error information.
to use the <link linkend="utf.user-guide.testing-tools">Testing tools</link> and generate more detailed error information.
</para>
</section>

304
doc/src/utf.examples.xml Normal file
View File

@ -0,0 +1,304 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE section PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.examples">
<title>The &utf; usage examples collection</title>
<titleabbrev>Examples collection</titleabbrev>
<itemizedlist mark ="square">
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.auto-nullary-test-case.example06"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example01"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example02"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example03"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example04"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example05"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.unary-test-case.example07"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.unary-test-case.example08"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.auto-test-case-template.example10"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-test-case-template.example09"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.auto-test-suite.example12"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-test-suite.example11"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.cla-access.example13"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.master-test-suite-name.example14"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.master-test-suite-name.example15"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.manual-expected-failures.example16"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.auto-expected-failures.example17"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.testing-tools.output-test.example28"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.testing-tools.output-test.example29"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.testing-tools.custom-predicate.example30"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.testing-tools.custom-predicate.example31"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example33"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.testing-tool-args.example32"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.BOOST_TEST_MESSAGE.example21"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.BOOST_TEST_CHECKPOINT.example22"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.BOOST_TEST_PASSPOINT.example23"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.fixture.per-test-case.example18"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.fixture.test-suite-shared.example19"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.fixture.global.example20"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="pem.usage.example24"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="pem.usage.example25"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="pem.usage.example26"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="minimal.example.example27"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example34"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example35"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example36"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example37"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example38"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example39"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example40"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example41"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example42"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example43"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example44"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example45"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example46"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example47"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example48"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.progress.example49"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.ct-config.output-stream.example50"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.ct-config.log-level.example51"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-output.log.ct-config.log-format.example52"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend="utf.user-guide.test-organization.auto-test-suite.example53"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example54"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example55"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example56"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example57"/>
</simpara>
</listitem>
<listitem>
<simpara>
<xref linkend=".example58"/>
</simpara>
</listitem>
</itemizedlist>
</section>

View File

@ -8,7 +8,9 @@
<para role="first-line-indented">
You think writing tests is difficult, annoying and fruitless work? I beg to differ. Read through these tutorials
and I am sure you will agree.
and I am sure you will agree. One other thing I do suggest you to take a look is
<link linkend="utf.compilation">compilation instructions</link>, especially if you plan to build and use standalone
library.
</para>
<xi:include href="tutorial.intro-in-testing.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>

View File

@ -0,0 +1,235 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE section PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide.glossary">
<title>Introduction &hellip; or what's your name?</title><titleabbrev>Introduction</titleabbrev>
<para role="first-line-indented">
Without further ado, let's define terms regularly used by the &utf;.
</para>
<variablelist>
<?dbhtml term-width="16%" list-width="100%"?>
<?dbhtml term-separator=":"?>
<?dbhtml table-summary="utf terms definition"?>
<varlistentry id="test-module.def">
<term><firstterm>The test module</firstterm></term>
<listitem>
<simpara>
This is a single binary that performs the test. Physically a test module consists of one or more test source files,
which can be built into an executable or a dynamic library. A test module that consists of a single test source
file is called <firstterm id="single-file-test-module.def">single-file test module</firstterm>. Otherwise
it's called <firstterm id="multi-file-test-module.def">multi-file test module</firstterm>. Logically a test
module consists of four parts: <link linkend="test-setup.def">test setup</link> (or test initialization),
<link linkend="test-body.def">test body</link>, <link linkend="test-cleanup.def">test cleanup</link> and
<link linkend="test-runner.def">test runner</link>. The test runner part is optional. If a test module is built as
an executable the test runner is built-in. If a test module is built as a dynamic library, it is run by an
external test runner.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-body.def">
<term><firstterm>The test body</firstterm></term>
<listitem>
<simpara>
This is the part of a test module that actually performs the test.
Logically test body is a collection of <link linkend="test-assertion.def">test assertions</link> wrapped in
<link linkend="test-case.def">test cases</link>, which are organized in a <link linkend="test-tree.def">test tree
</link>.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-tree.def">
<term><firstterm>The test tree</firstterm></term>
<listitem>
<simpara>
This is a hierarchical structure of <link linkend="test-suite.def">test suites</link> (non-leaf nodes) and
<link linkend="test-case.def">test cases</link> (leaf nodes).
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-unit.def">
<term><firstterm>The test unit</firstterm></term>
<listitem>
<simpara>
This is a collective name when referred to either <link linkend="test-suite.def">test suite</link> or
<link linkend="test-case.def">test case</link>
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-assertion.def">
<term><firstterm>Test assertion</firstterm></term>
<listitem>
<simpara>
This is a single binary condition (binary in a sense that is has two outcomes: pass and fail) checked
by a test module.
</simpara>
<simpara>
There are different schools of thought on how many test assertions a test case should consist of. Two polar
positions are the one advocated by TDD followers - one assertion per test case; and opposite of this - all test
assertions within single test case - advocated by those only interested in the first error in a
test module. The &utf; supports both approaches.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-case.def">
<term><firstterm>The test case</firstterm></term>
<listitem>
<simpara>
This is an independently monitored function within a test module that
consists of one or more test assertions. The term &quot;independently monitored&quot; in the definition above is
used to emphasize the fact, that all test cases are monitored independently. An uncaught exception or other normal
test case execution termination doesn't cause the testing to cease. Instead the error is caught by the test
case execution monitor, reported by the &utf; and testing proceeds to the next test case. Later on you are going
to see that this is on of the primary reasons to prefer multiple small test cases to a single big test function.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-suite.def">
<term><firstterm>The test suite</firstterm></term>
<listitem>
<simpara>
This is a container for one or more test cases. The test suite gives you an ability to group
test cases into a single referable entity. There are various reasons why you may opt to do so, including:
</simpara>
<itemizedlist>
<listitem>
<simpara>To group test cases per subsystems of the unit being tested.</simpara>
</listitem>
<listitem>
<simpara>To share test case setup/cleanup code.</simpara>
</listitem>
<listitem>
<simpara>To run selected group of test cases only.</simpara>
</listitem>
<listitem>
<simpara>To see test report split by groups of test cases</simpara>
</listitem>
<listitem>
<simpara>To skip groups of test cases based on the result of another test unit in a test tree.</simpara>
</listitem>
</itemizedlist>
<simpara>
A test suite can also contain other test suites, thus allowing a hierarchical test tree structure to be formed.
The &utf; requires the test tree to contain at least one test suite with at least one test case. The top level
test suite - root node of the test tree - is called the master test suite.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-setup.def">
<term><firstterm>The test setup</firstterm></term>
<listitem>
<simpara>
This is the part of a test module that is responsible for the test
preparation. It includes the following operations that take place prior to a start of the test:
</simpara>
<itemizedlist>
<listitem>
<simpara>
The &utf; initialization
</simpara>
</listitem>
<listitem>
<simpara>
Test tree construction
</simpara>
</listitem>
<listitem>
<simpara>
Global test module setup code
</simpara>
</listitem>
</itemizedlist>
<simpara>
Per test case&quot; setup code, invoked for every test case it's assigned to, is also attributed to the
test initialization, even though it's executed as a part of the test case.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-cleanup.def">
<term><firstterm>The test cleanup</firstterm></term>
<listitem>
<simpara>
This is the part of test module that is responsible for cleanup operations.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-fixture.def">
<term><firstterm>The test fixture</firstterm></term>
<listitem>
<simpara>
Matching setup and cleanup operations are frequently united into a single entity called test fixture.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-runner.def">
<term><firstterm>The test runner</firstterm></term>
<listitem>
<simpara>
This is an &quot;executive manager&quot; that runs the show. The test runner's functionality should include
the following interfaces and operations:
</simpara>
<itemizedlist>
<listitem>
<simpara>
Entry point to a test module. This is usually either the function main() itself or single function that can be
invoked from it to start testing.
</simpara>
</listitem>
<listitem>
<simpara>
Initialize the &utf; based on runtime parameters
</simpara>
</listitem>
<listitem>
<simpara>
Select an output media for the test log and the test results report
</simpara>
</listitem>
<listitem>
<simpara>
Select test cases to execute based on runtime parameters
</simpara>
</listitem>
<listitem>
<simpara>
Execute all or selected test cases
</simpara>
</listitem>
<listitem>
<simpara>
Produce the test results report
</simpara>
</listitem>
<listitem>
<simpara>
Generate a test module result code.
</simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
An advanced test runner may provide additional features, including interactive <acronym>GUI</acronym> interfaces,
test coverage and profiling support.
</para>
</listitem>
</varlistentry>
<varlistentry id="test-log.def">
<term><firstterm>The test log</firstterm></term>
<listitem>
<simpara>
This is the record of all events that occur during the testing.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-results-report.def">
<term><firstterm>The test results report</firstterm></term>
<listitem>
<simpara>
This is the report produced by the &utf; after the testing is completed, that indicates which test cases/test
suites passed and which failed.
</simpara>
</listitem>
</varlistentry>
</variablelist>
</section>

View File

@ -0,0 +1,150 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE chapter PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide.initialization">
<title>Test module initialization &hellip; or ready, set &hellip;</title>
<titleabbrev>Test module initialization</titleabbrev>
<para role="first-line-indented">
There are two tasks that you may need to perform before actual testing can start:
</para>
<itemizedlist>
<listitem>
<simpara>
The test tree needs to be built (unless you are using automated test units registration).
</simpara>
</listitem>
<listitem>
<simpara>
Custom test module initialization needs to be performed. This includes
initialization of the code under test and custom tune-up of the &utf; parameters (for example the test log or the
test results report output streams redirection).
</simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
The function dedicated for this purpose is called <firstterm>the test module initialization function</firstterm>. Alternatively you can
employ global fixtures, covered in details, including differences in two approaches, in
<xref linkend="utf.user-guide.fixture"/>.
</para>
<para role="first-line-indented">
The &utf; requires you to implement the test module initialization function. The test runner supplied with the static
library or single-header variants of the &utf; requires the specific function specification. The test runner supplied
with the dynamic library variant of the &utf; requires the specific initialization function signature only. <!-- TO FIX: specific specification -->
</para>
<para role="first-line-indented">
For many <link linkend="test-module.def">test modules</link> you don't need to do any custom initialization
and test tree construction is automated. In this case you don't really need the initialization function and
the &utf; provides a way to automatically generate an empty one for you.
</para>
<para role="first-line-indented">
Original design of the &utf; supported the manual test tree construction only. Later versions introduced the
automated registration of test units. In later versions of the &utf; the original initialization function
specification became inconvenient and unnecessary unsafe. So the alternative initialization function specification
was introduced. This change is not backward compatible. The test runners supplied with the static library and
single-header variants of the &utf; by default still require original initialization function specification, but
support <link linkend="utf.compilation.flags">compilation flags</link> that switch to the alternative one. The test
runner supplied with dynamic library variant of the &utf; requires new specification and doesn't support
original one. The plan is to deprecate the original initialization function specification in one of the future
releases and ultimately to stop supporting it.
</para>
<para role="first-line-indented">
The initialization function invocation is monitored by the &utf; the same way as all the test cases. An unexpected
exception or system error detected during initialization function invocation is treated as initialization error and
is reported as such.
</para>
<section id="utf.user-guide.initialization.orig-signature">
<title>Original initialization function signature and name</title>
<titleabbrev>Original initialization function</titleabbrev>
<para role="first-line-indented">
The original design of the &utf; initialization required you to implement the function with the following
specification:
</para>
<programlisting><classname>boost::unit_test::test_suite</classname>* init_unit_test_suite( int argc, char* argv[] );</programlisting>
<para role="first-line-indented">
This function was intended to initialize and return a master test suite. The null value was considered an initialization
error. The current design of the &utf; maintains master test suite instance internally and does not treat the null result
value as an initialization error. In fact it's recommended to return null value always and register test units in the
master test suite using the regular test suite add interface. The only way to indicate an initialization error is to throw the
<classname>boost::unit_test::framework::setup_error</classname> exception.
</para>
<para role="first-line-indented">
The initialization function parameters argc, argv provide the command line arguments specified during test
module invocation. It's guarantied that any framework-specific command line arguments are excluded. To be
consisted with the alternative initialization function specification it's recommended though to access the
command line arguments using the master test suite interface.
</para>
</section>
<section id="utf.user-guide.initialization.alt-signature">
<title>Alternative initialization function signature and name</title>
<titleabbrev>Alternative initialization function</titleabbrev>
<para role="first-line-indented">
The alternative design of the &utf; initialization requires you to implement a function with the following
specification:
</para>
<programlisting>bool init_unit_test();</programlisting>
<para role="first-line-indented">
The result value of this function indicates whether or not initialization was successful. To register test
units in a master test suite use the test suite add interface. To access command line arguments use the master
test suite interface. It's guarantied that any framework-specific command line arguments are excluded.
</para>
</section>
<section id="utf.user-guide.initialization.signature-typedef">
<title>Initialization function signature access</title>
<para role="first-line-indented">
The test runner interface needs to refer to the initialization function signature. The &utf; provides the typedef
that resolves to proper signature in all configurations:
</para>
<programlisting>namespace boost {
namespace unit_test {
#ifdef BOOST_TEST_ALTERNATIVE_INIT_API
typedef bool (*init_unit_test_func)();
#else
typedef test_suite* (*init_unit_test_func)( int, char* [] );
#endif
}
}</programlisting>
</section>
<section id="utf.user-guide.initialization.auto-generation">
<title>Automated generation of the test module initialization function</title>
<titleabbrev>Automated generation</titleabbrev>
<para role="first-line-indented">
To automatically generate an empty test module initialization function you need to define
<xref linkend="utf.flag.main" endterm="utf.flag.main"/> before including the
<filename class="headerfile">boost/test/unit_test.hpp</filename> header. The value of this define is ignored.
Alternatively you can define the macro <xref linkend="utf.flag.module" endterm="utf.flag.module"/> to be equal to
any string (not necessarily in quotes). This macro causes the same result as
<xref linkend="utf.flag.main" endterm="utf.flag.main"/>, and in addition the macro value becomes the name of the
master test suite.
</para>
<important>
<simpara>
For a test module consisting of multiple source files you have to define these flags in a single test file only.
Otherwise you end up with multiple instances of the initialization function.
</simpara>
</important>
</section>
</section>

View File

@ -265,6 +265,24 @@ Leaving test suite "example"
</descr>
</refentry>
<refentry name="break_exec_path">
<name>Break execution path</name>
<env>BOOST_TEST_BREAK_EXEC_PATH</env>
<cla>break_exec_path"</cla>
<vals>
<simplelist>
<member>string consisting of space separate test_name:execution_path_number pairs</member>
</simplelist>
</vals>
<descr>
<simpara>
this runtime parameter is used by exception safety tester. By default exception safety tester only reports index of
execution path and test case name where failure occurred. Using this parameter you can make the tester to break the
execution right before entering this path.
</simpara>
</descr>
</refentry>
<refentry name="build_info">
<name>Print build info</name>
<env>BOOST_TEST_BUILD_INFO</env>
@ -301,25 +319,20 @@ Leaving test suite "example"
</descr>
</refentry>
<refentry name="detect_memory_leak">
<name>Detect memory leaks</name>
<env>BOOST_TEST_DETECT_MEMORY_LEAK</env>
<cla>detect_memory_leaks</cla>
<refentry name="color_output">
<name>Produce color output</name>
<env>BOOST_TEST_COLOR_OUTPUT</env>
<cla>color_output</cla>
<vals>
<simplelist>
<member>0</member>
<member><emphasis role="bold">1</emphasis></member>
<member>integer value &gt; 1</member>
<member><emphasis role="bold">no</emphasis></member>
<member>yes</member>
</simplelist>
</vals>
<descr>
<simpara>
positive value tells the framework to detect the memory leaks (if any). Any value greater then 1 in addition
is treated as leak allocation number and setup runtime breakpoint. In other words setting this parameter to
the positive value N greater than 1 causes the framework to set a breakpoint at Nth memory allocation (don't
do that from the command line - only when you are under debugger). Note: if your test program produce memory
leaks notifications, they are combined with allocation number values you could use to set a breakpoint.
Currently only applies to MS family of compilers.
The &utf; is able to produce color output on systems which supports it. To enable this behavior set the parameter to
'yes'. By default the output produces in not colored.
</simpara>
</descr>
</refentry>
@ -340,7 +353,30 @@ Leaving test suite "example"
</simpara>
</descr>
</refentry>
<refentry name="detect_memory_leaks">
<name>Detect memory leaks</name>
<env>BOOST_TEST_DETECT_MEMORY_LEAK</env>
<cla>detect_memory_leaks</cla>
<vals>
<simplelist>
<member>0</member>
<member><emphasis role="bold">1</emphasis></member>
<member>integer value &gt; 1</member>
</simplelist>
</vals>
<descr>
<simpara>
positive value tells the framework to detect the memory leaks (if any). Any value greater then 1 in addition
is treated as leak allocation number and setups runtime breakpoint. In other words setting this parameter to
the positive value N greater than 1 causes the framework to set a breakpoint at Nth memory allocation (don't
do that from the command line - only when you are under debugger). Note: if your test program produce memory
leaks notifications, they are combined with allocation number values you could use to set a breakpoint.
Currently only applies to MS family of compilers.
</simpara>
</descr>
</refentry>
<refentry name="log_format">
<name>The log format</name>
<env>BOOST_TEST_LOG_FORMAT</env>
@ -419,6 +455,27 @@ Leaving test suite "example"
</descr>
</refentry>
<refentry name="log_sink">
<name>The log sink name</name>
<env>BOOST_TEST_LOG_SINK</env>
<cla>log_sink</cla>
<vals>
<simplelist>
<member><emphasis role="bold">stdout</emphasis></member>
<member>stderr</member>
<member>arbitrary file name</member>
</simplelist>
</vals>
<descr>
<simpara>
This parameter allows easily redirect the test log. The parameter value is the string containing either a file
name, in which case the &utf; will redirect log into file with that name, or 'stdout', in which case log is
redirected into standard output stream, or 'stderr' , in which case log is redirected into standard error stream.
Default is 'stdout'
</simpara>
</descr>
</refentry>
<refentry name="output_format">
<name>The output format</name>
<env>BOOST_TEST_OUTPUT_FORMAT</env>
@ -495,6 +552,27 @@ Leaving test suite "example"
</descr>
</refentry>
<refentry name="report_sink">
<name>The report sink name</name>
<env>BOOST_TEST_REPORT_SINK</env>
<cla>report_sink</cla>
<vals>
<simplelist>
<member><emphasis role="bold">stderr</emphasis></member>
<member>stdout</member>
<member>arbitrary file name</member>
</simplelist>
</vals>
<descr>
<simpara>
This parameter allows easily redirect the test results report. The parameter value is the string containing either
a file name, in which case the &utf; will redirect results report into file with that name, or 'stdout', in which case
report is redirected into standard output stream, or 'stderr', in which case report is redirected into standard error
stream. Default is 'stderr'.
</simpara>
</descr>
</refentry>
<refentry name="result_code">
<name>[Do not] return result code</name>
<env>BOOST_TEST_RESULT_CODE</env>
@ -532,6 +610,26 @@ Leaving test suite "example"
</descr>
</refentry>
<refentry name="save_patterm">
<name>Save patterm</name>
<env>BOOST_TEST_SAVE_PATTERN</env>
<cla>save_pattern</cla>
<vals>
<simplelist>
<member><emphasis role="bold">no</emphasis></member>
<member>yes</member>
</simplelist>
</vals>
<descr>
<simpara>
this parameter serves no particular purpose within the framework itself. It can be used by test modules relying
on output_test_stream to implement testing logic. output_test_stream has two modes of operation: save the pattern
file and match against stored pattern. You can use this parameter to switch between these modes, by passing the
parameter value to the output_test_stream constructor.
</simpara>
</descr>
</refentry>
<refentry name="show_progress">
<name>Show progress</name>
<env>BOOST_TEST_SHOW_PROGRESS</env>

View File

@ -113,16 +113,47 @@
<itemizedlist>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.manual-nullary-test-case">Manually registered test case</link>
<link linkend="utf.user-guide.test-organization.auto-nullary-test-case">Test case with automated registration</link>
</simpara>
</listitem>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.auto-nullary-test-case">Test case with automated registration</link>
<link linkend="utf.user-guide.test-organization.manual-nullary-test-case">Manually registered test case</link>
</simpara>
</listitem>
</itemizedlist>
<section id="utf.user-guide.test-organization.auto-nullary-test-case">
<title>Nullary function based test case with automated registration</title>
<titleabbrev>Automated registration</titleabbrev>
<para role="first-line-indented">
To create a nullary free function cased test case, which is registered in place of implementation, employ the
macro BOOST_AUTO_TEST_CASE.
</para>
<inline-synopsis>
<macro name="BOOST_AUTO_TEST_CASE" kind="functionlike">
<macro-parameter name="test_case_name"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
The macro is designed to closely mimic nullary free function syntax. Changes that are required to make an
existing test case, implemented as a free function, registered in place are illustrated in the following
example (compare with <xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example01"/>):
</para>
<btl-example name="example06">
<title>Nullary function based test case with automated registration</title>
</btl-example>
<para role="first-line-indented">
With this macro you don't need to implement the initialization function at all. The macro creates and
registers the test case with the name free_test_function automatically.
</para>
</section>
<section id="utf.user-guide.test-organization.manual-nullary-test-case">
<title>Manually registered nullary function based test case</title>
<titleabbrev>Manual registration</titleabbrev>
@ -212,37 +243,6 @@
<xref linkend="utf.user-guide.test-organization.unary-test-case"/>.
</para>
</section>
<section id="utf.user-guide.test-organization.auto-nullary-test-case">
<title>Nullary function based test case with automated registration</title>
<titleabbrev>Automated registration</titleabbrev>
<para role="first-line-indented">
To create a nullary free function cased test case, which is registered in place of implementation, employ the
macro BOOST_AUTO_TEST_CASE.
</para>
<inline-synopsis>
<macro name="BOOST_AUTO_TEST_CASE" kind="functionlike">
<macro-parameter name="test_case_name"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
The macro is designed to closely mimic nullary free function syntax. Changes that are required to make an
existing test case, implemented as a free function, registered in place are illustrated in the following
example (compare with <xref linkend="utf.user-guide.test-organization.manual-nullary-test-case.example01"/>):
</para>
<btl-example name="example06">
<title>Nullary function based test case with automated registration</title>
</btl-example>
<para role="first-line-indented">
With this macro you don't need to implement the initialization function at all. The macro creates and
registers the test case with the name free_test_function automatically.
</para>
</section>
</section>
<section id="utf.user-guide.test-organization.unary-test-case">
<title>Unary function based test case</title>
@ -329,20 +329,76 @@
</para>
<itemizedlist>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.manual-test-case-template">Manually registered test case
template</link>
</simpara>
</listitem>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.auto-test-case-template">Test case template with automated
registration</link>
</simpara>
</listitem>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.manual-test-case-template">Manually registered test case
template</link>
</simpara>
</listitem>
</itemizedlist>
<section id="utf.user-guide.test-organization.auto-test-case-template">
<title>Test case template with automated registration</title>
<titleabbrev>Automated registration</titleabbrev>
<para role="first-line-indented">
To create a test case template registered in place of implementation, employ the macro
BOOST_AUTO_TEST_CASE_TEMPLATE. This facility is also called <firstterm>auto test case template</firstterm>.
</para>
<inline-synopsis>
<macro name="BOOST_AUTO_TEST_CASE_TEMPLATE" kind="functionlike">
<macro-parameter name="test_case_name"/>
<macro-parameter name="formal_type_parameter_name"/>
<macro-parameter name="collection_of_types"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
The macro BOOST_AUTO_TEST_CASE_TEMPLATE requires three arguments:
</para>
<variablelist>
<?dbhtml list-presentation="list"?>
<?dbhtml term-width="60%" list-width="100%"?>
<?dbhtml term-separator=" - "?> <!-- TO FIX: where separator? -->
<varlistentry>
<term>The test case template name</term>
<listitem>
<simpara>
unique test cases template identifier
</simpara>
</listitem>
</varlistentry>
<varlistentry>
<term>The name of a formal template parameter</term>
<listitem>
<simpara>
name of the type the test case template is instantiated with
</simpara>
</listitem>
</varlistentry>
<varlistentry>
<term>The collection of types to instantiate test case template with</term>
<listitem>
<simpara>
arbitrary MPL sequence
</simpara>
</listitem>
</varlistentry>
</variablelist>
<btl-example name="example10">
<title>Test case template with automated registration</title>
</btl-example>
</section>
<section id="utf.user-guide.test-organization.manual-test-case-template">
<title>Manually registered test case template</title>
<titleabbrev>Manual registration</titleabbrev>
@ -438,62 +494,6 @@
<title>Manually registered test case template</title>
</btl-example>
</section>
<section id="utf.user-guide.test-organization.auto-test-case-template">
<title>Test case template with automated registration</title>
<titleabbrev>Automated registration</titleabbrev>
<para role="first-line-indented">
To create a test case template registered in place of implementation, employ the macro
BOOST_AUTO_TEST_CASE_TEMPLATE. This facility is also called <firstterm>auto test case template</firstterm>.
</para>
<inline-synopsis>
<macro name="BOOST_AUTO_TEST_CASE_TEMPLATE" kind="functionlike">
<macro-parameter name="test_case_name"/>
<macro-parameter name="formal_type_parameter_name"/>
<macro-parameter name="collection_of_types"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
The macro BOOST_AUTO_TEST_CASE_TEMPLATE requires three arguments:
</para>
<variablelist>
<?dbhtml list-presentation="list"?>
<?dbhtml term-width="60%" list-width="100%"?>
<?dbhtml term-separator=" - "?> <!-- TO FIX: where separator? -->
<varlistentry>
<term>The test case template name</term>
<listitem>
<simpara>
unique test cases template identifier
</simpara>
</listitem>
</varlistentry>
<varlistentry>
<term>The name of a formal template parameter</term>
<listitem>
<simpara>
name of the type the test case template is instantiated with
</simpara>
</listitem>
</varlistentry>
<varlistentry>
<term>The collection of types to instantiate test case template with</term>
<listitem>
<simpara>
arbitrary MPL sequence
</simpara>
</listitem>
</varlistentry>
</variablelist>
<btl-example name="example10">
<title>Test case template with automated registration</title>
</btl-example>
</section>
</section>
<section id="utf.user-guide.test-organization.test-suite">
@ -510,99 +510,22 @@
<itemizedlist>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.manual-test-suite">Manually registered test suite</link>
<link linkend="utf.user-guide.test-organization.auto-test-suite">Test suite with automated registration</link>
</simpara>
</listitem>
<listitem>
<simpara>
<link linkend="utf.user-guide.test-organization.auto-test-suite">Test suite with automated registration</link>
<link linkend="utf.user-guide.test-organization.manual-test-suite">Manually registered test suite</link>
</simpara>
</listitem>
</itemizedlist>
<section id="utf.user-guide.test-organization.test-suite-registration-interface">
<title>Test unit registration interface</title>
<para role="first-line-indented">
The &utf; models the notion of test case container - test suite - using class boost::unit_test::test_suite. For
complete class interface reference check advanced section of this documentation. Here you should only be
interested in a single test unit registration interface:
</para>
<programlisting>void test_suite::add( test_unit* tc, counter_t expected_failures = 0, int timeout = 0 );</programlisting>
<para role="first-line-indented">
The first parameter is a pointer to a newly created test unit. The second optional parameter -
expected_failures - defines the number of test assertions that are expected to fail within the test unit. By
default no errors are expected.
</para>
<caution>
<simpara>
Be careful when supplying a number of expected failures for test suites. By default the &utf; calculates the
number of expected failures in test suite as the sum of appropriate values in all test units that constitute
it. And it rarely makes sense to change this.
</simpara>
</caution>
<para role="first-line-indented">
The third optional parameter - timeout - defines the timeout value for the test unit. As of now the &utf;
isn't able to set a timeout for the test suite execution, so this parameter makes sense only for test case
registration. By default no timeout is set. See the method
<methodname>boost::execution_monitor::execute</methodname> for more details about the timeout value.
</para>
<para role="first-line-indented">
To register group of test units in one function call the boost::unit_test::test_suite provides another add
interface covered in the advanced section of this documentation.
</para>
</section>
<section id="utf.user-guide.test-organization.manual-test-suite">
<title>Manually registered test suites</title>
<titleabbrev>Manual registration</titleabbrev>
<para role="first-line-indented">
To create a test suite manually, employ the macro BOOST_TEST_SUITE:
</para>
<inline-synopsis>
<macro name="BOOST_TEST_SUITE" kind="functionlike">
<macro-parameter name="test_suite_name"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
BOOST_TEST_SUITE creates an instance of the class boost::unit_test::test_suite and returns a pointer to the
constructed instance. Alternatively you can create an instance of class boost::unit_test::test_suite yourself.
</para>
<note>
<simpara>
boost::unit_test::test_suite instances have to be allocated on the heap and the compiler won't allow you
to create one on the stack.
</simpara>
</note>
<para role="first-line-indented">
Newly created test suite has to be registered in a parent one using add interface. Both test suite creation and
registration is performed in the test module initialization function.
</para>
<btl-example name="example11">
<title>Manually registered test suites</title>
</btl-example>
<para role="first-line-indented">
This example creates a test tree, which can be represented by the following hierarchy:
</para>
<mediaobject>
<imageobject>
<imagedata format="jpg" fileref="../img/class-hier.jpg"/>
</imageobject>
</mediaobject>
</section>
<para role="first-line-indented">
In addition the &utf; presents a notion of
<link linkend="utf.user-guide.test-organization.master-test-suite">Master Test Suite</link>. The most important
reason to learn about this component is that it provides an ability to access command line arguments supplied
to a test module.
</para>
<section id="utf.user-guide.test-organization.auto-test-suite">
<title>Test suites with automated registration</title>
@ -656,6 +579,101 @@
</btl-example>
</section>
<section id="utf.user-guide.test-organization.manual-test-suite">
<title>Manually registered test suites</title>
<titleabbrev>Manual registration</titleabbrev>
<para role="first-line-indented">
To create a test suite manually you need to create an instance of boost::unit_test::test_suite class, register
it in test tree and populate it with test cases (or lower level test suites).
</para>
<section id="utf.user-guide.test-organization.test-suite-registration-interface">
<title>Test unit registration interface</title>
<para role="first-line-indented">
The &utf; models the notion of test case container - test suite - using class boost::unit_test::test_suite. For
complete class interface reference check advanced section of this documentation. Here you should only be
interested in a single test unit registration interface:
</para>
<programlisting>void test_suite::add( test_unit* tc, counter_t expected_failures = 0, int timeout = 0 );</programlisting>
<para role="first-line-indented">
The first parameter is a pointer to a newly created test unit. The second optional parameter -
expected_failures - defines the number of test assertions that are expected to fail within the test unit. By
default no errors are expected.
</para>
<caution>
<simpara>
Be careful when supplying a number of expected failures for test suites. By default the &utf; calculates the
number of expected failures in test suite as the sum of appropriate values in all test units that constitute
it. And it rarely makes sense to change this.
</simpara>
</caution>
<para role="first-line-indented">
The third optional parameter - timeout - defines the timeout value for the test unit. As of now the &utf;
isn't able to set a timeout for the test suite execution, so this parameter makes sense only for test case
registration. By default no timeout is set. See the method
<methodname>boost::execution_monitor::execute</methodname> for more details about the timeout value.
</para>
<para role="first-line-indented">
To register group of test units in one function call the boost::unit_test::test_suite provides another add
interface covered in the advanced section of this documentation.
</para>
</section>
<section id="utf.user-guide.test-organization.test-suite-instance-construction">
<title>Test suite instance construction</title>
<para role="first-line-indented">
To create a test suite instance manually, employ the macro BOOST_TEST_SUITE. It hides all implementation
details and you only required to specify the test suite name:
</para>
<inline-synopsis>
<macro name="BOOST_TEST_SUITE" kind="functionlike">
<macro-parameter name="test_suite_name"/>
</macro>
</inline-synopsis>
<para role="first-line-indented">
BOOST_TEST_SUITE creates an instance of the class boost::unit_test::test_suite and returns a pointer to the
constructed instance. Alternatively you can create an instance of class boost::unit_test::test_suite yourself.
</para>
<note>
<simpara>
boost::unit_test::test_suite instances have to be allocated on the heap and the compiler won't allow you
to create one on the stack.
</simpara>
</note>
<para role="first-line-indented">
Newly created test suite has to be registered in a parent one using add interface. Both test suite creation and
registration is performed in the test module initialization function.
</para>
</section>
<btl-example name="example11">
<title>Manually registered test suites</title>
</btl-example>
<para role="first-line-indented">
This example creates a test tree, which can be represented by the following hierarchy:
</para>
<mediaobject>
<imageobject>
<imagedata format="jpg" fileref="../img/class-hier.jpg"/>
</imageobject>
</mediaobject>
</section>
<section id="utf.user-guide.test-organization.master-test-suite">
<title>Master Test Suite</title>

View File

@ -16,7 +16,7 @@
<simpara>All test errors are reported uniformly</simpara>
<simpara>
The test execution monitor along with standardized output from all included
<link linkend="utf.testing-tools">testing tools</link> provides uniform reporting for all errors including fatal
<link linkend="utf.user-guide.testing-tools">testing tools</link> provides uniform reporting for all errors including fatal
errors, like memory assess violation and uncaught exceptions.
</simpara>
</listitem>
@ -175,10 +175,10 @@
<title>Logging tool arguments</title>
<para role="first-line-indented">
Most of the <link linkend="utf.testing-tools">testing tools</link> print values of their arguments to the output
Most of the <link linkend="utf.user-guide.testing-tools">testing tools</link> print values of their arguments to the output
stream in some form of log statement. If arguments type does not support <code>operator&lt;&lt;(std::ostream&amp;,
ArgumentType const&amp;)</code> interface you will get a compilation error. You can either implement above
interface or prohibit the <link linkend="utf.testing-tools">testing tools</link> from logging argument values for
interface or prohibit the <link linkend="utf.user-guide.testing-tools">testing tools</link> from logging argument values for
specified type. To do so use following statement on file level before first test case that includes statement
failing to compile:
</para>
@ -473,7 +473,7 @@ Boost : $BOOST_VERSION</literallayout></computeroutput>
</segmentedlist>
<para role="first-line-indented">
Advanced <link linkend="utf.testing-tools">testing tools</link> may produce more complicated error messages.
Advanced <link linkend="utf.user-guide.testing-tools">testing tools</link> may produce more complicated error messages.
</para>
</section>

View File

@ -0,0 +1,155 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE chapter PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide.test-runners">
<title>The supplied test runners &hellip; or where is the entrance?</title>
<titleabbrev>Supplied test runners</titleabbrev>
<para role="first-line-indented">
All usage variants of the &utf;, excluding the
<link linkend="utf.user-guide.external-test-runner">external test runner</link>, supply the test runner in a form of
free function named unit_test_main with the following signature:
</para>
<programlisting>int unit_test_main( init_unit_test_func init_func, int argc, char* argv[] );</programlisting>
<para role="first-line-indented">
To invoke the test runner you are required to supply the pointer to the <link linkend="test-module.def">test module</link>
initialization function as the first argument to the test runner function. In majority of the cases this function is
invoked directly from test executable entry point - function main(). In most usage variants the &utf; can
automatically generate default function main() implementation as either part of the library or test module itself.
Since the function main needs to refer to the initialization function by name, it is predefined by the default
implementation and you are required to match both specific signature and name, when implementing initialization
function. If you for any reason prefer more flexibility you can opt to implement the function main() yourself, in
which case it's going to be your responsibility to invoke the test runner, but the initialization function name is
not enforces the &utf;. See below for flags that needs to be defined/undefined in each usage variant to enable this.
</para>
<warning>
<simpara>
In spite syntactic similarity the signatures of the test runner function in fact are different for different usage
variants. The cause is different signature of the test module initialization function referred by the
<link linkend="utf.user-guide.initialization.signature-typedef">typedef init_unit_test_func</link>. This makes static
and dynamic library usage variants incompatible and they can't be easily switched on a fly.
</simpara>
</warning>
<section id="utf.user-guide.static-lib-runner">
<title>Static library variant of the &utf;</title>
<titleabbrev>Static library</titleabbrev>
<para role="first-line-indented">
By default this variant supplies the function main() as part of static library. If this is for any reason undesirable
you need to define the flag <xref linkend="utf.flag.no-main" endterm="utf.flag.no-main"/> during the library
compilation and the function main() implementation won't be generated.
</para>
<para role="first-line-indented">
In addition to the <link linkend="utf.user-guide.static-lib-variant">initialization function signature requirement</link>
default function main() implementation assumes the name of initialization function is init_unit_test_suite
</para>
</section>
<section id="utf.user-guide.dynamic-lib-runner">
<title>Dynamic library variant of the &utf;</title>
<titleabbrev>Dynamic library</titleabbrev>
<para role="first-line-indented">
Unlike the static library variant function main() can't reside in the dynamic library body. Instead this variant
supplies default function main() implementation as part of the header
<filename class="headerfile">boost/test/unit_test.hpp</filename> to be generated as part of your test file body.
The function main() is generated only if either the <xref linkend="utf.flag.main" endterm="utf.flag.main"/> or
the <xref linkend="utf.flag.module" endterm="utf.flag.module"/> flags are defined during a test module compilation.
For <link linkend="single-file-test-module.def">single-file test module</link> flags can be defined either in a
test module's makefile or before the header <filename class="headerfile">boost/test/unit_test.hpp</filename>
inclusion. For a <xref linkend="multi-file-test-module.def" endterm="multi-file-test-module.def"/> flags can't
be defined in makefile and have to be defined in only one of the test files to avoid duplicate copies of the
function main().
</para>
<important>
<simpara>
The same flags also govern generation of an empty
<link linkend="utf.user-guide.initialization">test module initialization function</link>. This means that if you
need to implement either function main() or initialization function manually, you can't define the above flags
and are required to manually implement both of them.
</simpara>
</important>
</section>
<section id="utf.user-guide.single-header-runner">
<title>Single-header variant of the &utf;</title>
<titleabbrev>Single header</titleabbrev>
<para role="first-line-indented">
By default this variant supplies function main() as part of the header
<filename class="headerfile">boost/test/included/unit_test.hpp</filename> to be generated as part of your test file
body. If this is for any reason undesirable you need to define the flag
<xref linkend="utf.flag.no-main" endterm="utf.flag.no-main"/> during test module compilation and the function main()
implementation won't be generated.
</para>
</section>
<section id="utf.user-guide.external-test-runner">
<title>External test runner variant of the &utf;</title>
<titleabbrev>External test runner</titleabbrev>
<para role="first-line-indented">
The external test runner variant of the &utf; supplies the test runner in a form of standalone utility
boost_test_runner. You are free to implement different, more advanced, test runners that can be used with this
variant.
</para>
<simpara>
<!-- TO FIX -->
</simpara>
</section>
<section id="utf.user-guide.runners-exit-status">
<title>Generated exit status values</title>
<para role="first-line-indented">
Once testing is finished, all supplied test runners report the results and returns an exit status value. Here are
the summary of all possible generated values:
</para>
<table id="utf.user-guide.runners-exit-status-summary">
<title>Generated exit status values</title>
<tgroup cols="2">
<colspec colname="c1"/>
<colspec colname="c2"/>
<thead>
<row>
<entry>Value</entry>
<entry>Meaning</entry>
</row>
</thead>
<tbody>
<row>
<entry>boost::exit_success</entry>
<entry>
No errors occurred during the test or the success result code was explicitly requested with the no_result_code
parameter.
</entry>
</row>
<row>
<entry>boost::exit_test_failure</entry>
<entry>
Non-fatal errors detected and no uncaught exceptions were thrown during testing or the &utf; fails during
initialization.
</entry>
</row>
<row>
<entry>boost::exit_exception_failure</entry>
<entry>
Fatal errors were detected or uncaught exceptions thrown during testing.
</entry>
</row>
</tbody>
</tgroup>
</table>
</section>
</section>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,158 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE chapter PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide.usage-variants">
<title>The &utf; usage variants &hellip; or the <ulink url="http://en.wikipedia.org/wiki/Buridan's_ass">Buridan's donkey</ulink> parable</title>
<titleabbrev>Usage variants</titleabbrev>
<para role="first-line-indented">
The &utf; presents you with 4 different variants how it can be used.
</para>
<itemizedlist>
<listitem>
<simpara><link linkend="utf.user-guide.static-lib-variant">The static library variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.dynamic-lib-variant">The dynamic library variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.single-header-variant">The single-header variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.extern-test-runner-variant">The external test runner variant</link></simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
Unlike the Buridan's donkey though, you shouldn't have problems deciding which one to use, since there are
clear reasons why would you prefer each one.
</para>
<para role="first-line-indented">
In most cases to compile a test module based on the &utf; all you need to include is just the single header
<filename class="headerfile">boost/test/unit_test.hpp</filename>. This header includes internally most of the other
headers that contains the &utf; definitions. Some advanced features, like the floating point comparison or the
logged expectations testing, are defined in independent headers and need to be included explicitly.
</para>
<section id="utf.user-guide.static-lib-variant">
<title>The static library variant of the &utf;</title><titleabbrev>Static library</titleabbrev>
<para role="first-line-indented">
The &utf; can be built into a static library. If you opt to link a test module with the
<link linkend="utf.compilation.standalone">standalone static library</link>, this usage is called the static library
variant of the &utf;.
</para>
<para role="first-line-indented">
The test runner supplied with this variant required you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches one of the two specifications depending on the compilation flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/>. If flag isn't defined you are required
to match the original specification. If you define the flag <xref linkend="utf.flag.alt-init-api"
endterm="utf.flag.alt-init-api"/> during a test module compilation you are required to use the alternative
initialization function specification. The &utf; provides an ability to
<link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link> an empty test module
initialization function with correct specification if no custom initialization is required by a test module.
</para>
<important>
<simpara>
If you opted to use an alternative initialization API, for a test module to be able to link with prebuilt library,
the flag <xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/> has to be defined both during
library and a test module compilation.
</simpara>
</important>
</section>
<section id="utf.user-guide.dynamic-lib-variant">
<title>The dynamic library variant of the &utf;</title>
<titleabbrev>Dynamic library</titleabbrev>
<para role="first-line-indented">
In the project with large number of test modules <link linkend="utf.user-guide.dynamic-lib-variant">the static
library</link> variant of the &utf; may cause you to waste a lot of disk space, since the &utf; is linked
statically with every test module. The solution is to link with the &utf; built into a dynamic library. If you opt
to link a test module with the prebuilt dynamic library, this usage is called the dynamic library variant of the
&utf;. This variant requires you to define the flag <xref linkend="utf.flag.dyn-link" endterm="utf.flag.dyn-link"/>
either in a makefile or before the header <filename class="headerfile">boost/test/unit_test.hpp</filename>
inclusion.
</para>
<para role="first-line-indented">
The test runner supplied with this variant requires you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches the alternative initialization function signature. The &utf;
provides an ability to <link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link>
an empty test module initialization function with correct signature if no custom initialization is required by a
test module.
</para>
<note>
<simpara>
The name of the test module initialization function is not enforced, since the function is passed as an argument
to the test runner.
</simpara>
</note>
</section>
<section id="utf.user-guide.single-header-variant">
<title>The single-header variant of the &utf;</title>
<titleabbrev>Single header</titleabbrev>
<para role="first-line-indented">
If you prefer to avoid the <link linkend="utf.compilation.standalone">standalone library compilation</link>, you
should use the single-header variant of the &utf;. This variant is implemented, as it follows from its name, in
the single header <filename class="headerfile">boost/test/included/unit_test.hpp</filename>. An inclusion of
the header causes the complete implementation of the &utf; to be included as a part of a test module's
source file. The header <filename class="headerfile">boost/test/unit_test.hpp</filename> doesn't have to be
included anymore. You don't have to worry about disabling <link linkend="utf.compilation.auto-linking">
auto-linking</link> feature either. It's done in the implementation header already. This variant
can't be used with the <xref linkend="multi-file-test-module.def" endterm="multi-file-test-module.def"/>.
Otherwise it's almost identical from the usage prospective to the static library variant of the &utf;.
In fact the only difference is the name of the include file:
<filename class="headerfile">boost/test/included/unit_test.hpp</filename> instead of
<filename class="headerfile">boost/test/unit_test.hpp</filename>.
</para>
<para role="first-line-indented">
The test runner supplied with this variant requires you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches one of the two specifications depending on the compilation flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/>. If flag isn't defined you are required to
match the original specification. If you define the flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/> during a test module compilation you are
required to use the alternative initialization function specification. The &utf; provides an ability to
<link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link> an empty test module
initialization function with correct specification if no custom initialization is required by a test module.
</para>
</section>
<section id="utf.user-guide.extern-test-runner-variant">
<title>The external test runner variant of the &utf;</title>
<titleabbrev>External test runner</titleabbrev>
<para role="first-line-indented">
All other usage variants employ the build-in test runners. If you plan to use an external test runner with your
test module you need to build it as a dynamic library. This usage of the &utf; is called the external test runner
variant of the &utf;. The variant requires you to define the flag
<xref linkend="utf.flag.dyn-link" endterm="utf.flag.dyn-link"/> either in a makefile or before the header
<filename class="headerfile">boost/test/unit_test.hpp</filename> inclusion. An external test runner utility is
required to link with dynamic library.
</para>
<para role="first-line-indented">
If an external test runner is based on the test runner built in to the dynamic library (like the standalone
boost_test_runner utility supplied by the &utf;), it requires you to implement the <link linkend="test-module.def">
test module</link> initialization function that matches the alternative initialization function signature. The
&utf; provides an ability to <link linkend="utf.user-guide.initialization.auto-generation">automatically generate
</link> an empty test module initialization function with correct signature if no custom initialization is required
by a test module.
</para>
<note>
<simpara>
An advanced test runner doesn't have to be based on the build-in one and may require a different
test module initialization function signature and/or name.
</simpara>
</note>
</section>
</section>

View File

@ -0,0 +1,68 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE chapter PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide" last-revision="$Date$">
<title>Unit Test Framework: User's guide</title><titleabbrev>User's guide</titleabbrev>
<section id="utf.user-guide.intro">
<title>Introduction &hellip; or where to start?</title><titleabbrev>Introduction</titleabbrev>
<para role="first-line-indented">
Without further ado, I'd like to start &hellip; but where? It's not obvious what is the best order to describe the framework.
One can use bottom up approach, starting with with basics and going up to cover real interfaces based on them. The downside is
that you'll have to dig through the multiple pages of information you may not never need in real life. One can follow the order
of test program execution. From test initialization to test tree construction to the report and log generation. This also
unfortunately may not be most clear way. The Boost.Test &utf; is very flexible and a lot of details of various test initialization
options may not necessarily important for average user, while understanding test output is.
</para>
<para role="first-line-indented">
Well &hellip; This is a User's Guide after all. Let's go by what <emphasis role="bold">you</emphasis> need to know to successfully use the &utf;. Thus I follow
the order of decisions you as a user have to make and order of complexity of the problems you have to solve. If you find yourself
faces with some unclear term feel free to jump directly to the <link linkend="utf.user-guide.glossary">Glossary</link> section,
where I collection short definition for all used terms. And again if you want to jump right into coding the
<link linkend="utf.tutorials">Tutorials</link> section would be a better place to start.
</para>
<para role="first-line-indented">
The &utf; has several usage variants. And the first decision you have to make is which one to use. These variants are covered in
section dedicated to <link linkend="utf.user-guide.usage-variants">Usage variants</link>. The next step, probably the most important
for you, is to start writing test cases, bind them in test suites and implement your checks. First two topics are coverred in
<link linkend="utf.user-guide.test-organization">Test organization</link> section, while
<link linkend="utf.user-guide.testing-tools">Testing tools</link> section arms you with rich set of tools enough to implement
almost arbitrary check you need.
</para>
<para role="first-line-indented">
Next you'll learn how to understand and manipulate the &utf; output in a <link linkend="utf.user-guide.test-output">Test output</link>
section. At that point you should be able to build and run most simple test modules and almost inevitable find a need to configure
how the test module is executed. Whether you want to change output format, select which test case to run or run test cases in random order
these and may other runtime configuration parameters are discribed in <link linkend="utf.user-guide.runtime-config">Runtime configuration</link>
section.
</para>
<para role="first-line-indented">
One of the first non trivial things you might want toadd to your test module is test fixture. Fixture support is coverred in
<link linkend="utf.user-guide.fixture">Test fixture</link> section. Usually th default test module initialization will work just fine,
but if you want to implement some custom initialization or change how default initialization behaves you need to first look in
<link linkend="utf.user-guide.initialization">Test module initialization</link> section. Here you'll learn about various options the &utf;
provides for you to customize this behavior.
</para>
<para role="first-line-indented">
Finally you might want to learn about how the &utf; implements entry points into the test modules. This is especially important if you
intend to implement main function yourself (and not use the main function provided by the &utf;). The
<link linkend="utf.user-guide.test-runners">Test runners</link> section covers this subject. Different usage variants employ slightly
different approached to implementing test module entry points and presents slightly different interfaces. This section intended for advanced
user some of the details of the implementation are described there.
</para>
</section>
<xi:include href="utf.user-guide.usage-variants.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.test-organization.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.testing-tools.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.test-output.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.runtime-config.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.fixture.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.initialization.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.test-runners.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.glossary.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
</section>

View File

@ -1,698 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE chapter PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "../../../../tools/boostbook/dtd/boostbook.dtd" [
<!ENTITY utf "<acronym>UTF</acronym>">
]>
<section id="utf.user-guide" last-revision="$Date$">
<title>Unit Test Framework: User's guide</title><titleabbrev>User's guide</titleabbrev>
<section id="utf.user-guide.intro">
<title>Introduction &hellip; or what's your name?</title><titleabbrev>Introduction</titleabbrev>
<para role="first-line-indented">
Without further ado, let's define terms regularly used by the &utf;.
</para>
<variablelist>
<?dbhtml term-width="16%" list-width="100%"?>
<?dbhtml term-separator=":"?>
<?dbhtml table-summary="utf terms definition"?>
<varlistentry id="test-module.def">
<term><firstterm>The test module</firstterm></term>
<listitem>
<simpara>
This is a single binary that performs the test. Physically a test module consists of one or more test source files,
which can be built into an executable or a dynamic library. A test module that consists of a single test source
file is called <firstterm id="single-file-test-module.def">single-file test module</firstterm>. Otherwise
it's called <firstterm id="multi-file-test-module.def">multi-file test module</firstterm>. Logically a test
module consists of four parts: <link linkend="test-setup.def">test setup</link> (or test initialization),
<link linkend="test-body.def">test body</link>, <link linkend="test-cleanup.def">test cleanup</link> and
<link linkend="test-runner.def">test runner</link>. The test runner part is optional. If a test module is built as
an executable the test runner is built-in. If a test module is built as a dynamic library, it is run by an
external test runner.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-body.def">
<term><firstterm>The test body</firstterm></term>
<listitem>
<simpara>
This is the part of a test module that actually performs the test.
Logically test body is a collection of <link linkend="test-assertion.def">test assertions</link> wrapped in
<link linkend="test-case.def">test cases</link>, which are organized in a <link linkend="test-tree.def">test tree
</link>.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-tree.def">
<term><firstterm>The test tree</firstterm></term>
<listitem>
<simpara>
This is a hierarchical structure of <link linkend="test-suite.def">test suites</link> (non-leaf nodes) and
<link linkend="test-case.def">test cases</link> (leaf nodes).
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-unit.def">
<term><firstterm>The test unit</firstterm></term>
<listitem>
<simpara>
This is a collective name when referred to either <link linkend="test-suite.def">test suite</link> or
<link linkend="test-case.def">test case</link>
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-assertion.def">
<term><firstterm>Test assertion</firstterm></term>
<listitem>
<simpara>
This is a single binary condition (binary in a sense that is has two outcomes: pass and fail) checked
by a test module.
</simpara>
<simpara>
There are different schools of thought on how many test assertions a test case should consist of. Two polar
positions are the one advocated by TDD followers - one assertion per test case; and opposite of this - all test
assertions within single test case - advocated by those only interested in the first error in a
test module. The &utf; supports both approaches.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-case.def">
<term><firstterm>The test case</firstterm></term>
<listitem>
<simpara>
This is an independently monitored function within a test module that
consists of one or more test assertions. The term &quot;independently monitored&quot; in the definition above is
used to emphasize the fact, that all test cases are monitored independently. An uncaught exception or other normal
test case execution termination doesn't cause the testing to cease. Instead the error is caught by the test
case execution monitor, reported by the &utf; and testing proceeds to the next test case. Later on you are going
to see that this is on of the primary reasons to prefer multiple small test cases to a single big test function.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-suite.def">
<term><firstterm>The test suite</firstterm></term>
<listitem>
<simpara>
This is a container for one or more test cases. The test suite gives you an ability to group
test cases into a single referable entity. There are various reasons why you may opt to do so, including:
</simpara>
<itemizedlist>
<listitem>
<simpara>To group test cases per subsystems of the unit being tested.</simpara>
</listitem>
<listitem>
<simpara>To share test case setup/cleanup code.</simpara>
</listitem>
<listitem>
<simpara>To run selected group of test cases only.</simpara>
</listitem>
<listitem>
<simpara>To see test report split by groups of test cases</simpara>
</listitem>
<listitem>
<simpara>To skip groups of test cases based on the result of another test unit in a test tree.</simpara>
</listitem>
</itemizedlist>
<simpara>
A test suite can also contain other test suites, thus allowing a hierarchical test tree structure to be formed.
The &utf; requires the test tree to contain at least one test suite with at least one test case. The top level
test suite - root node of the test tree - is called the master test suite.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-setup.def">
<term><firstterm>The test setup</firstterm></term>
<listitem>
<simpara>
This is the part of a test module that is responsible for the test
preparation. It includes the following operations that take place prior to a start of the test:
</simpara>
<itemizedlist>
<listitem>
<simpara>
The &utf; initialization
</simpara>
</listitem>
<listitem>
<simpara>
Test tree construction
</simpara>
</listitem>
<listitem>
<simpara>
Global test module setup code
</simpara>
</listitem>
</itemizedlist>
<simpara>
Per test case&quot; setup code, invoked for every test case it's assigned to, is also attributed to the
test initialization, even though it's executed as a part of the test case.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-cleanup.def">
<term><firstterm>The test cleanup</firstterm></term>
<listitem>
<simpara>
This is the part of test module that is responsible for cleanup operations.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-fixture.def">
<term><firstterm>The test fixture</firstterm></term>
<listitem>
<simpara>
Matching setup and cleanup operations are frequently united into a single entity called test fixture.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-runner.def">
<term><firstterm>The test runner</firstterm></term>
<listitem>
<simpara>
This is an &quot;executive manager&quot; that runs the show. The test runner's functionality should include
the following interfaces and operations:
</simpara>
<itemizedlist>
<listitem>
<simpara>
Entry point to a test module. This is usually either the function main() itself or single function that can be
invoked from it to start testing.
</simpara>
</listitem>
<listitem>
<simpara>
Initialize the &utf; based on runtime parameters
</simpara>
</listitem>
<listitem>
<simpara>
Select an output media for the test log and the test results report
</simpara>
</listitem>
<listitem>
<simpara>
Select test cases to execute based on runtime parameters
</simpara>
</listitem>
<listitem>
<simpara>
Execute all or selected test cases
</simpara>
</listitem>
<listitem>
<simpara>
Produce the test results report
</simpara>
</listitem>
<listitem>
<simpara>
Generate a test module result code.
</simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
An advanced test runner may provide additional features, including interactive <acronym>GUI</acronym> interfaces,
test coverage and profiling support.
</para>
</listitem>
</varlistentry>
<varlistentry id="test-log.def">
<term><firstterm>The test log</firstterm></term>
<listitem>
<simpara>
This is the record of all events that occur during the testing.
</simpara>
</listitem>
</varlistentry>
<varlistentry id="test-results-report.def">
<term><firstterm>The test results report</firstterm></term>
<listitem>
<simpara>
This is the report produced by the &utf; after the testing is completed, that indicates which test cases/test
suites passed and which failed.
</simpara>
</listitem>
</varlistentry>
</variablelist >
</section>
<section id="utf.user-guide.usage-variants">
<title>The &utf; usage variants &hellip; or the <ulink url="http://en.wikipedia.org/wiki/Buridan's_ass">Buridan's donkey</ulink> parable</title>
<titleabbrev>Usage variants</titleabbrev>
<para role="first-line-indented">
The &utf; presents you with 4 different variants how it can be used.
</para>
<itemizedlist>
<listitem>
<simpara><link linkend="utf.user-guide.static-lib-variant">The static library variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.dynamic-lib-variant">The dynamic library variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.single-header-variant">The single-header variant</link></simpara>
</listitem>
<listitem>
<simpara><link linkend="utf.user-guide.extern-test-runner-variant">The external test runner variant</link></simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
Unlike the Buridan's donkey though, you shouldn't have problems deciding which one to use, since there are
clear reasons why would you prefer each one.
</para>
<para role="first-line-indented">
In most cases to compile a test module based on the &utf; all you need to include is just the single header
<filename class="headerfile">boost/test/unit_test.hpp</filename>. This header includes internally most of the other
headers that contains the &utf; definitions. Some advanced features, like the floating point comparison or the
logged expectations testing, are defined in independent headers and need to be included explicitly.
</para>
<section id="utf.user-guide.static-lib-variant">
<title>The static library variant of the &utf;</title><titleabbrev>Static library</titleabbrev>
<para role="first-line-indented">
The &utf; can be built into a static library. If you opt to link a test module with the
<link linkend="utf.compilation.standalone">standalone static library</link>, this usage is called the static library
variant of the &utf;.
</para>
<para role="first-line-indented">
The test runner supplied with this variant requires you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches one of the two specifications depending on the compilation flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/>. If flag isn't defined you are required
to match the original specification. If you define the flag <xref linkend="utf.flag.alt-init-api"
endterm="utf.flag.alt-init-api"/> during a test module compilation you are required to use the alternative
initialization function specification. The &utf; provides an ability to
<link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link> an empty test module
initialization function with correct specification if no custom initialization is required by a test module.
</para>
<important>
<simpara>
If you opted to use an alternative initialization API, for a test module to be able to link with prebuilt library,
the flag <xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/> has to be defined both during
library and a test module compilation.
</simpara>
</important>
</section>
<section id="utf.user-guide.dynamic-lib-variant">
<title>The dynamic library variant of the &utf;</title>
<titleabbrev>Dynamic library</titleabbrev>
<para role="first-line-indented">
In the project with large number of test modules <link linkend="utf.user-guide.dynamic-lib-variant">the static
library</link> variant of the &utf; may cause you to waste a lot of disk space, since the &utf; is linked
statically with every test module. The solution is to link with the &utf; built into a dynamic library. If you opt
to link a test module with the prebuilt dynamic library, this usage is called the dynamic library variant of the
&utf;. This variant requires you to define the flag <xref linkend="utf.flag.dyn-link" endterm="utf.flag.dyn-link"/>
either in a makefile or before the header <filename class="headerfile">boost/test/unit_test.hpp</filename>
inclusion.
</para>
<para role="first-line-indented">
The test runner supplied with this variant requires you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches the alternative initialization function signature. The &utf;
provides an ability to <link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link>
an empty test module initialization function with correct signature if no custom initialization is required by a
test module.
</para>
<note>
<simpara>
The name of the test module initialization function is not enforced, since the function is passed as an argument
to the test runner.
</simpara>
</note>
</section>
<section id="utf.user-guide.single-header-variant">
<title>The single-header variant of the &utf;</title>
<titleabbrev>Single header</titleabbrev>
<para role="first-line-indented">
If you prefer to avoid the <link linkend="utf.compilation.standalone">standalone library compilation</link>, you
should use the single-header variant of the &utf;. This variant is implemented, as it follows from its name, in
the single header <filename class="headerfile">boost/test/included/unit_test.hpp</filename>. An inclusion of
the header causes the complete implementation of the &utf; to be included as a part of a test module's
source file. The header <filename class="headerfile">boost/test/unit_test.hpp</filename> doesn't have to be
included anymore. You don't have to worry about disabling <link linkend="utf.compilation.auto-linking">
auto-linking</link> feature either. It's done in the implementation header already. This variant
can't be used with the <xref linkend="multi-file-test-module.def" endterm="multi-file-test-module.def"/>.
Otherwise it's almost identical from the usage prospective to the static library variant of the &utf;.
In fact the only difference is the name of the include file:
<filename class="headerfile">boost/test/included/unit_test.hpp</filename> instead of
<filename class="headerfile">boost/test/unit_test.hpp</filename>.
</para>
<para role="first-line-indented">
The test runner supplied with this variant requires you to implement the <link linkend="test-module.def">test
module</link> initialization function that matches one of the two specifications depending on the compilation flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/>. If flag isn't defined you are required to
match the original specification. If you define the flag
<xref linkend="utf.flag.alt-init-api" endterm="utf.flag.alt-init-api"/> during a test module compilation you are
required to use the alternative initialization function specification. The &utf; provides an ability to
<link linkend="utf.user-guide.initialization.auto-generation">automatically generate</link> an empty test module
initialization function with correct specification if no custom initialization is required by a test module.
</para>
</section>
<section id="utf.user-guide.extern-test-runner-variant">
<title>The external test runner variant of the &utf;</title>
<titleabbrev>External test runner</titleabbrev>
<para role="first-line-indented">
All other usage variants employ the build-in test runners. If you plan to use an external test runner with your
test module you need to build it as a dynamic library. This usage of the &utf; is called the external test runner
variant of the &utf;. The variant requires you to define the flag
<xref linkend="utf.flag.dyn-link" endterm="utf.flag.dyn-link"/> either in a makefile or before the header
<filename class="headerfile">boost/test/unit_test.hpp</filename> inclusion. An external test runner utility is
required to link with dynamic library.
</para>
<para role="first-line-indented">
If an external test runner is based on the test runner built in to the dynamic library (like the standalone
boost_test_runner utility supplied by the &utf;), it requires you to implement the <link linkend="test-module.def">
test module</link> initialization function that matches the alternative initialization function signature. The
&utf; provides an ability to <link linkend="utf.user-guide.initialization.auto-generation">automatically generate
</link> an empty test module initialization function with correct signature if no custom initialization is required
by a test module.
</para>
<note>
<simpara>
An advanced test runner doesn't have to be based on the build-in one and may require a different
test module initialization function signature and/or name.
</simpara>
</note>
</section>
</section>
<section id="utf.user-guide.test-runners">
<title>The supplied test runners &hellip; or where is the entrance?</title>
<titleabbrev>Supplied test runners</titleabbrev>
<para role="first-line-indented">
All usage variants of the &utf;, excluding the
<link linkend="utf.user-guide.external-test-runner">external test runner</link>, supply the test runner in a form of
free function named unit_test_main with the following signature:
</para>
<programlisting>int unit_test_main( init_unit_test_func init_func, int argc, char* argv[] );</programlisting>
<para role="first-line-indented">
To invoke the test runner you are required to supply the pointer to the <link linkend="test-module.def">test module</link>
initialization function as the first argument to the test runner function. In majority of the cases this function is
invoked directly from test executable entry point - function main(). In most usage variants the &utf; can
automatically generate default function main() implementation as either part of the library or test module itself.
Since the function main needs to refer to the initialization function by name, it is predefined by the default
implementation and you are required to match both specific signature and name, when implementing initialization
function. If you for any reason prefer more flexibility you can opt to implement the function main() yourself, in
which case it's going to be your responsibility to invoke the test runner, but the initialization function name is
not enforces the &utf;. See below for flags that needs to be defined/undefined in each usage variant to enable this.
</para>
<warning>
<simpara>
In spite syntactic similarity the signatures of the test runner function in fact are different for different usage
variants. The cause is different signature of the test module initialization function referred by the
<link linkend="utf.user-guide.initialization.signature-typedef">typedef init_unit_test_func</link>. This makes static
and dynamic library usage variants incompatible and they can't be easily switched on a fly.
</simpara>
</warning>
<section id="utf.user-guide.static-lib-runner">
<title>Static library variant of the &utf;</title>
<titleabbrev>Static library</titleabbrev>
<para role="first-line-indented">
By default this variant supplies the function main() as part of static library. If this is for any reason undesirable
you need to define the flag <xref linkend="utf.flag.no-main" endterm="utf.flag.no-main"/> during the library
compilation and the function main() implementation won't be generated.
</para>
<para role="first-line-indented">
In addition to the <link linkend="utf.user-guide.static-lib-variant">initialization function signature requirement</link>
default function main() implementation assumes the name of initialization function is init_unit_test_suite
</para>
</section>
<section id="utf.user-guide.dynamic-lib-runner">
<title>Dynamic library variant of the &utf;</title>
<titleabbrev>Dynamic library</titleabbrev>
<para role="first-line-indented">
Unlike the static library variant function main() can't reside in the dynamic library body. Instead this variant
supplies default function main() implementation as part of the header
<filename class="headerfile">boost/test/unit_test.hpp</filename> to be generated as part of your test file body.
The function main() is generated only if either the <xref linkend="utf.flag.main" endterm="utf.flag.main"/> or
the <xref linkend="utf.flag.module" endterm="utf.flag.module"/> flags are defined during a test module compilation.
For <link linkend="single-file-test-module.def">single-file test module</link> flags can be defined either in a
test module's makefile or before the header <filename class="headerfile">boost/test/unit_test.hpp</filename>
inclusion. For a <xref linkend="multi-file-test-module.def" endterm="multi-file-test-module.def"/> flags can't
be defined in makefile and have to be defined in only one of the test files to avoid duplicate copies of the
function main().
</para>
<important>
<simpara>
The same flags also govern generation of an empty
<link linkend="utf.user-guide.initialization">test module initialization function</link>. This means that if you
need to implement either function main() or initialization function manually, you can't define the above flags
and are required to manually implement both of them.
</simpara>
</important>
</section>
<section id="utf.user-guide.single-header-runner">
<title>Single-header variant of the &utf;</title>
<titleabbrev>Single header</titleabbrev>
<para role="first-line-indented">
By default this variant supplies function main() as part of the header
<filename class="headerfile">boost/test/included/unit_test.hpp</filename> to be generated as part of your test file
body. If this is for any reason undesirable you need to define the flag
<xref linkend="utf.flag.no-main" endterm="utf.flag.no-main"/> during test module compilation and the function main()
implementation won't be generated.
</para>
</section>
<section id="utf.user-guide.external-test-runner">
<title>External test runner variant of the &utf;</title>
<titleabbrev>External test runner</titleabbrev>
<para role="first-line-indented">
The external test runner variant of the &utf; supplies the test runner in a form of standalone utility
boost_test_runner. You are free to implement different, more advanced, test runners that can be used with this
variant.
</para>
<simpara>
<!-- TO FIX -->
</simpara>
</section>
<section id="utf.user-guide.runners-exit-status">
<title>Generated exit status values</title>
<para role="first-line-indented">
Once testing is finished, all supplied test runners report the results and returns an exit status value. Here are
the summary of all possible generated values:
</para>
<table id="utf.user-guide.runners-exit-status-summary">
<title>Generated exit status values</title>
<tgroup cols="2">
<colspec colname="c1"/>
<colspec colname="c2"/>
<thead>
<row>
<entry>Value</entry>
<entry>Meaning</entry>
</row>
</thead>
<tbody>
<row>
<entry>boost::exit_success</entry>
<entry>
No errors occurred during the test or the success result code was explicitly requested with the no_result_code
parameter.
</entry>
</row>
<row>
<entry>boost::exit_test_failure</entry>
<entry>
Non-fatal errors detected and no uncaught exceptions were thrown during testing or the &utf; fails during
initialization.
</entry>
</row>
<row>
<entry>boost::exit_exception_failure</entry>
<entry>
Fatal errors were detected or uncaught exceptions thrown during testing.
</entry>
</row>
</tbody>
</tgroup>
</table>
</section>
</section>
<section id="utf.user-guide.initialization">
<title>Test module initialization &hellip; or ready, set &hellip;</title>
<titleabbrev>Test module initialization</titleabbrev>
<para role="first-line-indented">
There are two tasks that you may need to perform before actual testing can start:
</para>
<itemizedlist>
<listitem>
<simpara>
The test tree needs to be built (unless you are using automated test units registration).
</simpara>
</listitem>
<listitem>
<simpara>
Custom test module initialization needs to be performed. This includes
initialization of the code under test and custom tune-up of the &utf; parameters (for example the test log or the
test results report output streams redirection).
</simpara>
</listitem>
</itemizedlist>
<para role="first-line-indented">
The function dedicated for this purpose is called the test module initialization function. Alternatively you can
employ global fixtures, covered in details, including differences in two approaches, in
<xref linkend="utf.user-guide.fixture"/>.
</para>
<para role="first-line-indented">
The &utf; requires you to implement the test module initialization function. The test runner supplied with the static
library or single-header variants of the &utf; requires the specific function specification. The test runner supplied
with the dynamic library variant of the &utf; requires the specific initialization function signature only. <!-- TO FIX: specific specification -->
</para>
<para role="first-line-indented">
For many <link linkend="test-module.def">test modules</link> you don't need to do any custom initialization
and test tree construction is automated. In this case you don't really need the initialization function and
the &utf; provides a way to automatically generate an empty one for you.
</para>
<para role="first-line-indented">
Original design of the &utf; supported the manual test tree construction only. Later versions introduced the
automated registration of test units. In later versions of the &utf; the original initialization function
specification became inconvenient and unnecessary unsafe. So the alternative initialization function specification
was introduced. This change is not backward compatible. The test runners supplied with the static library and
single-header variants of the &utf; by default still require original initialization function specification, but
support <link linkend="utf.compilation.flags">compilation flags</link> that switch to the alternative one. The test
runner supplied with dynamic library variant of the &utf; requires new specification and doesn't support
original one. The plan is to deprecate the original initialization function specification in one of the future
releases and ultimately to stop supporting it.
</para>
<para role="first-line-indented">
The initialization function invocation is monitored by the &utf; the same way as all the test cases. An unexpected
exception or system error detected during initialization function invocation is treated as initialization error and
is reported as such.
</para>
<section id="utf.user-guide.initialization.orig-signature">
<title>Original initialization function signature and name</title>
<titleabbrev>Original initialization function</titleabbrev>
<para role="first-line-indented">
The original design of the &utf; initialization requires you to implement the function with the following
specification:
</para>
<programlisting><classname>boost::unit_test::test_suite</classname>* init_unit_test_suite( int argc, char* argv[] );</programlisting>
<para role="first-line-indented">
In original design of the &utf; this function was intended to initialize and return a master test suite. The null
value was considered an initialization error. The current design of the &utf; maintains master test suite instance
internally and does not treat the null result value as an initialization error. In fact it's recommended to
return null value always and register test units in the master test suite using the regular test suite add
interface. The only way to indicate an initialization error is to throw the
<classname>boost::unit_test::framework::setup_error</classname> exception.
</para>
<para role="first-line-indented">
The initialization function parameters argc, argv provide the command line arguments specified during test
module invocation. It's guarantied that any framework-specific command line arguments are excluded. To be
consisted with the alternative initialization function specification it's recommended though to access the
command line arguments using the master test suite interface.
</para>
</section>
<section id="utf.user-guide.initialization.alt-signature">
<title>Alternative initialization function signature and name</title>
<titleabbrev>Alternative initialization function</titleabbrev>
<para role="first-line-indented">
The alternative design of the &utf; initialization requires you to implement a function with the following
specification:
</para>
<programlisting>bool init_unit_test();</programlisting>
<para role="first-line-indented">
The result value of this function indicates whether or not initialization was successful. To register test
units in a master test suite use the test suite add interface. To access command line arguments use the master
test suite interface. It's guarantied that any framework-specific command line arguments are excluded.
</para>
</section>
<section id="utf.user-guide.initialization.signature-typedef">
<title>Initialization function signature access</title>
<para role="first-line-indented">
The test runner interface needs to refer to the initialization function signature. The &utf; provides the typedef
that resolves to proper signature in all configurations:
</para>
<programlisting>namespace boost {
namespace unit_test {
#ifdef BOOST_TEST_ALTERNATIVE_INIT_API
typedef bool (*init_unit_test_func)();
#else
typedef test_suite* (*init_unit_test_func)( int, char* [] );
#endif
}
}</programlisting>
</section>
<section id="utf.user-guide.initialization.auto-generation">
<title>Automated generation of the test module initialization function</title>
<titleabbrev>Automated generation</titleabbrev>
<para role="first-line-indented">
To automatically generate an empty test module initialization function you need to define
<xref linkend="utf.flag.main" endterm="utf.flag.main"/> before including the
<filename class="headerfile">boost/test/unit_test.hpp</filename> header. The value of this define is ignored.
Alternatively you can define the macro <xref linkend="utf.flag.module" endterm="utf.flag.module"/> to be equal to
any string (not necessarily in quotes). This macro causes the same result as
<xref linkend="utf.flag.main" endterm="utf.flag.main"/>, and in addition the macro value becomes the name of the
master test suite.
</para>
<important>
<simpara>
For a test module consisting of multiple source files you have to define these flags in a single test file only.
Otherwise you end up with multiple instances of the initialization function.
</simpara>
</important>
</section>
</section>
<xi:include href="utf.users-guide.test-organization.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.users-guide.fixture.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.users-guide.test-output.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.runtime-config.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
</section>

View File

@ -92,7 +92,7 @@
<itemizedlist mark ="square">
<listitem>
<simpara>
Simplify writing test cases by using various <link linkend="utf.testing-tools.reference">testing tools</link>.
Simplify writing test cases by using various <link linkend="utf.user-guide.testing-tools.reference">testing tools</link>.
</simpara>
</listitem>
<listitem>
@ -143,8 +143,8 @@
</itemizedlist>
<para role="first-line-indented">
For those interested in getting started quickly please visit <ulink url="example-toc.html">collection of
examples</ulink> presented in this documentation.
For those interested in getting started quickly please visit <link linkend="utf.examples">collection of
examples</link> presented in this documentation.
</para>
</section>
@ -161,7 +161,9 @@
The &utf; is comparatively complicated component and is implemented in close to hundred header and source files,
so for long term usage the preferable solution is to build the &utf; as a reusable standalone library.
Depending on your platform this may save you a significant time during test module compilation and doesn't
really require that much effort.
really require that much effort <footnote><simpara>If you are using Visual studio compilers do not forget to
set a subsystem to console when you build test modules. You can do it either in project properties or by setting
command line /SUBSYTEM:CONSOLE. Number of people reported link error caused specifically by this omission</simpara></footnote>.
<ulink url="http://boost.org/more/getting_started/index.html">Boost Getting started</ulink> tells you how to get
pre-built libraries for some platforms. If available, this is the easiest option and you can ignore standalone
library compilation instructions below.
@ -229,7 +231,7 @@
</itemizedlist>
</section>
<section id="utf.compilation.procedured">
<section id="utf.compilation.procedure">
<title>Compilation procedures</title>
<para role="first-line-indented">
@ -237,9 +239,9 @@
linking with the &utf; may require additional steps. The &utf; presents you with options to either
<link linkend="utf.compilation.standalone">built and link with a standalone library</link> or
<link linkend="utf.compilation.direct-include">include the implementation directly</link> into a test module.
If you opt to use the library the &utf; headers implement the
<link linkend="utf.compilation.auto-linking">auto-linking support</link>. The compilation of the &utf; library and
a test module can be configured using the following compilation flags.
If you opt to use the library the &utf; headers implement the <link linkend="utf.compilation.auto-linking">
auto-linking support</link>. The compilation of the &utf; library and a test module can be configured using the
following compilation flags.
</para>
<table id="utf.compilation.flags">
@ -314,7 +316,7 @@
(for example Microsoft Visual Studio). The Boost preferred solution is Boost.Build system that is based on top of
bjam tool. Make systems require some kind of configuration file that lists all files that constitute the library
and all build options. For example the makefile that is used by make, or the Microsoft Visual Studio project file,
Jamfile is used by Boost.Build. For the sake of simplicity let's call this file the makefile.</simpara></footnote>.
Jamfile is used by Boost.Build. For the sake of simplicity let's call this file the makefile.</simpara></footnote>.
</para>
<para role="first-line-indented">
@ -409,7 +411,7 @@
</section>
</section>
<xi:include href="utf.users-guide.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.testing-tools.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.user-guide.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.usage-recommendations.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="utf.examples.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
</section>

View File

@ -370,7 +370,7 @@
<xsl:variable name = "home" select = "/*[1]"/>
<xsl:variable name = "up" select = "parent::*"/>
<xsl:variable name = "boost.test.image.src" select = "concat($boost.root, '/libs/test/docbook/img/boost.test.logo.png')"/>
<xsl:variable name = "boost.test.image.src" select = "concat($boost.root, '/libs/test/doc/img/boost.test.logo.png')"/>
<table width = "100%">
<tr>