lazarus/docs/xml/lcl/lconvencoding.xml
mattias d5c901ec68 lcl: docs: fixed encoding
git-svn-id: trunk@37849 -
2012-07-02 14:50:07 +00:00

116 lines
4.1 KiB
XML

<?xml version="1.0" encoding="UTF-8"?>
<fpdoc-descriptions>
<package name="lcl">
<!--
====================================================================
LConv
====================================================================
-->
<module name="LConvEnconding">
<short>Contains routines and tables to convert text between various encodings.</short>
<descr/>
<!-- unresolved type reference Visibility: default -->
<element name="SysUtils">
<short/>
<descr/>
<seealso/>
</element>
<!-- unresolved type reference Visibility: default -->
<element name="classes">
<short/>
<descr/>
<seealso/>
</element>
<!-- function Visibility: default -->
<element name="CPConvert">
<short/>
<descr/>
<errors/>
<seealso/>
</element>
<!-- function result Visibility: default -->
<element name="CPConvert.Result">
<short/>
</element>
<!-- argument Visibility: default -->
<element name="CPConvert.s">
<short/>
</element>
<!-- argument Visibility: default -->
<element name="CPConvert.from">
<short/>
</element>
<!-- argument Visibility: default -->
<element name="CPConvert.toC">
<short/>
</element>
<!-- function Visibility: default -->
<element name="GetDefaultCodepage">
<short/>
<descr/>
<errors/>
<seealso/>
</element>
<!-- function result Visibility: default -->
<element name="GetDefaultCodepage.Result">
<short/>
</element>
<element name="GuessEncoding">
<short>Guess character encoding of a string.</short>
<descr>The returned encoding can be used with other functions of this unit. GuessEncoding uses some heuristics to find out a good encoding. For example it checks for magic numbers like utf BOM. Next it checks if it could be UTF-8. Finally it returns the system encoding.
This function can be used to find the encoding of text files.</descr>
</element>
<element name="ConvertEncoding">
<short>Converts the character encoding of a string</short>
<descr>ConvertEncoding uses has tables to convert between almost any desired encoding. The parameters for this function are:
s - The string to be convert
FromEncoding - The encoding of the string s
ToEncoding - The encoding of the result
The encodings are specified as string values, and the following values are currently supported:
'utf8' - UTF-8, the standard Unicode encoding of the LCL
'ansi' - Ansi represents whatever is the current system locale in a single-byte or multi-byte encoding, but never in a word encoding like UTF-16
'utf8bom' - Indicates that the text is in UTF-8 and starts with a UTF-8 byte order mark
'ucs2le' - UCS 2 encoded in little endian byte order
'ucs2be' - UCS 2 encoded in bif endian byte order
'iso88591' - ISO 8859-1 Latin 1
'iso88592' - ISO 8859-2 Latin 2
'cp1250' - CP1250
'cp1251' - CP1251
'cp1252' -
'cp1253' -
'cp1254' -
'cp1255' -
'cp1256' -
'cp1257' -
'cp1258' -
'cp437' -
'cp850' -
'cp852' -
'cp866' -
'cp874' -
'cp936' -
'cp950' -
'cp949' -
'cp932' -
'koi8' - KOI8
</descr>
</element>
<element name="GetDefaultTextEncoding">
<short>Returns the common text encoding of the current operating system.</short>
<descr>At the moment it gives:
Under Windows it returns the windows codepage.
Under Darwin (Mac OS X) it returns UTF-8.
Under all other Unix (e.g. Linux, BSD, Solaris) it uses the environment variables LC_LANG, LC_MESSAGE, and LANG. This is nowadays typically UTF-8.</descr>
</element>
<element name="NormalizeEncoding">
<short>Trims, shorten and lower case the given encoding</short>
</element>
<element name="GetSupportedEncodings">
<short>Fills the list with all basic encodings</short>
<descr>It does not add all encodings that are supported by the operating system. For example iconv under Linux supports hundreds of encodings.</descr>
</element>
</module>
<!-- LConv -->
</package>
</fpdoc-descriptions>