+void
+StringTestCase::DoTestConversion(const char *s,
+ const wchar_t *ws,
+ wxCSConv& conv)
+{
+#if wxUSE_UNICODE
+ if ( ws )
+ {
+ wxCharBuffer buf(wxString(ws).mb_str(conv));
+
+ CPPUNIT_ASSERT( strcmp(buf, s) == 0 );
+ }
+#else // wxUSE_UNICODE
+ if ( s )
+ {
+ wxWCharBuffer wbuf(wxString(s).wc_str(conv));
+
+ if ( ws )
+ CPPUNIT_ASSERT( wcscmp(wbuf, ws) == 0 );
+ else
+ CPPUNIT_ASSERT( !*wbuf );
+ }
+#endif // wxUSE_UNICODE/!wxUSE_UNICODE
+}
+
+struct StringConversionData
+{
+ const char *str;
+ const wchar_t *wcs;
+};
+
+void StringTestCase::ConversionUTF7()
+{
+ static const StringConversionData utf7data[] =
+ {
+ { "+-", L"+" },
+ { "+--", L"+-" },
+ { "+AKM-", L"\u00a3" },
+
+ // Windows accepts invalid UTF-7 strings and so does our UTF-7
+ // conversion code -- this is wrong IMO but the way it is for now
+ //
+ // notice that converting "+" still behaves as expected because the
+ // result is just an empty string, i.e. the same as if there were an
+ // error, but converting "a+" results in "a" while it really should
+ // fail
+ { "+", NULL },
+ { "a+", L"a" },
+ };
+
+ wxCSConv conv(_T("utf-7"));
+ for ( size_t n = 0; n < WXSIZEOF(utf7data); n++ )
+ {
+ const StringConversionData& d = utf7data[n];
+ DoTestConversion(d.str, d.wcs, conv);
+ }
+}
+
+void StringTestCase::ConversionUTF8()
+{
+ static const StringConversionData utf8data[] =
+ {
+ { "\xc2\xa3", L"\u00a3" },
+ { "\xc2", NULL },
+ };
+
+ wxCSConv conv(_T("utf-8"));
+ for ( size_t n = 0; n < WXSIZEOF(utf8data); n++ )
+ {
+ const StringConversionData& d = utf8data[n];
+ DoTestConversion(d.str, d.wcs, conv);
+ }
+}
+
+#endif // wxUSE_WCHAR_T
+
+