--- test/html-webhacc/cc.cgi 2007/06/27 11:08:03 1.1
+++ test/html-webhacc/cc.cgi 2007/07/17 14:28:20 1.14
@@ -5,197 +5,486 @@
/home/wakaba/work/manakai/lib
/home/wakaba/public_html/-temp/wiki/lib];
use CGI::Carp qw[fatalsToBrowser];
-use Time::HiRes qw/time/;
+use Scalar::Util qw[refaddr];
use SuikaWiki::Input::HTTP; ## TODO: Use some better CGI module
+sub htescape ($) {
+ my $s = $_[0];
+ $s =~ s/&/&/g;
+ $s =~ s/</g;
+ $s =~ s/>/>/g;
+ $s =~ s/"/"/g;
+ $s =~ s{([\x00-\x09\x0B-\x1F\x7F-\xA0\x{FEFF}\x{FFFC}-\x{FFFF}])}{
+ sprintf 'U+%04X', ord $1;
+ }ge;
+ return $s;
+} # htescape
+
my $http = SuikaWiki::Input::HTTP->new;
## TODO: _charset_
-my @mode = split m#/#, scalar $http->meta_variable ('PATH_INFO'), -1;
-shift @mode if @mode and $mode[0] == '';
-## TODO: decode unreserved characters
-
- my $s = $http->parameter ('s');
- if (length $s > 1000_000) {
- print STDOUT "Status: 400 Document Too Long\nContent-Type: text/plain; charset=us-ascii\n\nToo long";
+ if ($http->meta_variable ('PATH_INFO') ne '/') {
+ print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n400";
exit;
}
- my $char_length = length $s;
- my %time;
- my $time1;
- my $time2;
+
+ binmode STDOUT, ':utf8';
+ $| = 1;
require Message::DOM::DOMImplementation;
- my $dom = Message::DOM::DOMImplementation->____new;
-# $| = 1;
- my $doc;
- my $el;
+ my $dom = Message::DOM::DOMImplementation->new;
-if (@mode == 3 and $mode[0] eq 'html' and
- ($mode[2] eq 'html' or $mode[2] eq 'test')) {
- print STDOUT "Content-Type: text/plain; charset=utf-8\n\n";
+ load_text_catalog ('en'); ## TODO: conneg
- require Encode;
- require Whatpm::HTML;
+ my @nav;
+ print STDOUT qq[Content-Type: text/html; charset=utf-8
- $time1 = time;
- $s = Encode::decode ('utf-8', $s);
- $time2 = time;
- $time{decode} = $time2 - $time1;
-
+
+
+
+Web Document Conformance Checker (BETA)
+
+
+
+
+];
+
+ $| = 0;
+ my $input = get_input_document ($http, $dom);
+ my $inner_html_element = $http->parameter ('e');
+
+ print qq[
+
+
+- Request URI
+ <@{[htescape $input->{request_uri}]}>
+- Document URI
+ <@{[htescape $input->{uri}]}>
+]; # no
yet
+ push @nav, ['#document-info' => 'Information'];
+
+if (defined $input->{s}) {
+
+ print STDOUT qq[
+
Base URI
+
<@{[htescape $input->{base_uri}]}>
+
Internet Media Type
+
@{[htescape $input->{media_type}]}
+ @{[$input->{media_type_overridden} ? '(overridden)' : '']}
+
Character Encoding
+
@{[defined $input->{charset} ? ''.htescape ($input->{charset}).'
' : '(none)']}
+ @{[$input->{charset_overridden} ? '(overridden)' : '']}
+
+
+];
- print STDOUT "#errors\n";
+ print_http_header_section ($input);
+
+ my $doc;
+ my $el;
+
+ if ($input->{media_type} eq 'text/html') {
+ require Encode;
+ require Whatpm::HTML;
+
+ $input->{charset} ||= 'ISO-8859-1'; ## TODO: for now.
+
+ my $t = Encode::decode ($input->{charset}, $input->{s});
+
+ print STDOUT qq[
+
+
Parse Errors
+
+
];
+ push @nav, ['#parse-errors' => 'Parse Error'];
my $onerror = sub {
my (%opt) = @_;
- print STDOUT "$opt{line},$opt{column},$opt{type}\n";
+ my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level});
+ if ($opt{column} > 0) {
+ print STDOUT qq[- Line $opt{line} column $opt{column}
\n];
+ } else {
+ $opt{line} = $opt{line} - 1 || 1;
+ print STDOUT qq[- Line $opt{line}
\n];
+ }
+ $type =~ tr/ /-/;
+ $type =~ s/\|/%7C/g;
+ $msg .= qq[ [Description]];
+ print STDOUT qq[- $msg
\n];
};
$doc = $dom->create_document;
- $time1 = time;
- if (length $mode[1]) {
+ if (defined $inner_html_element and length $inner_html_element) {
$el = $doc->create_element_ns
- ('http://www.w3.org/1999/xhtml', [undef, $mode[1]]);
- Whatpm::HTML->set_inner_html ($el, $s, $onerror);
+ ('http://www.w3.org/1999/xhtml', [undef, $inner_html_element]);
+ Whatpm::HTML->set_inner_html ($el, $t, $onerror);
} else {
- Whatpm::HTML->parse_string ($s => $doc, $onerror);
+ Whatpm::HTML->parse_string ($t => $doc, $onerror);
}
- $time2 = time;
- $time{parse} = $time2 - $time1;
- print "#document\n";
+ print STDOUT qq[
+
+];
+
+ print_source_string_section (\($input->{s}), $input->{charset});
+ } elsif ({
+ 'text/xml' => 1,
+ 'application/xhtml+xml' => 1,
+ 'application/xml' => 1,
+ }->{$input->{media_type}}) {
+ require Message::DOM::XMLParserTemp;
+
+ print STDOUT qq[
+
+
Parse Errors
- my $out;
- if ($mode[2] eq 'html') {
- $time1 = time;
- $out = Whatpm::HTML->get_inner_html ($el || $doc);
- $time2 = time;
- $time{serialize_html} = $time2 - $time1;
- } else { # test
- $time1 = time;
- $out = test_serialize ($el || $doc);
- $time2 = time;
- $time{serialize_test} = $time2 - $time1;
- }
- print STDOUT Encode::encode ('utf-8', $$out);
- print STDOUT "\n";
-} elsif (@mode == 3 and $mode[0] eq 'xhtml' and
- ($mode[2] eq 'html' or $mode[2] eq 'test')) {
- print STDOUT "Content-Type: text/plain; charset=utf-8\n\n";
-
- require Message::DOM::XMLParserTemp;
- print STDOUT "#errors\n";
+
];
+ push @nav, ['#parse-errors' => 'Parse Error'];
my $onerror = sub {
my $err = shift;
- print STDOUT $err->location->line_number, ",";
- print STDOUT $err->location->column_number, ",";
- print STDOUT $err->text, "\n";
+ my $line = $err->location->line_number;
+ print STDOUT qq[- Line $line column ];
+ print STDOUT $err->location->column_number, "
- ";
+ print STDOUT htescape $err->text, "
\n";
return 1;
};
- open my $fh, '<', \$s;
- my $time1 = time;
+ open my $fh, '<', \($input->{s});
$doc = Message::DOM::XMLParserTemp->parse_byte_stream
- ($fh => $dom, $onerror, charset => 'utf-8');
- my $time2 = time;
- $time{parse_xml} = $time2 - $time1;
-
- print "#document\n";
-
- my $out;
- if ($mode[2] eq 'html') {
- ## TODO: Use XHTML serializer
- #$out = Whatpm::HTML->get_inner_html ($doc);
- } else { # test
- $time1 = time;
- $out = test_serialize ($doc);
- $time2 = time;
- $time{serialize_test} = $time2 - $time1;
+ ($fh => $dom, $onerror, charset => $input->{charset});
+
+ print STDOUT qq[
+
+
+];
+ print_source_string_section (\($input->{s}), $doc->input_encoding);
+ } else {
+ ## TODO: Change HTTP status code??
+ print STDOUT qq[
+
+
Media type @{[htescape $input->{media_type}]}
is not supported!
+
+];
+ push @nav, ['#result-summary' => 'Result'];
}
- print STDOUT Encode::encode ('utf-8', $$out);
- print STDOUT "\n";
-} else {
- print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n404";
- exit;
-}
- if ($http->parameter ('dom5')) {
+
+ if (defined $doc or defined $el) {
+ print STDOUT qq[
+
+
Document Tree
+];
+ push @nav, ['#document-tree' => 'Tree'];
+
+ print_document_tree ($el || $doc);
+
+ print STDOUT qq[
+
+
+
+
Document Errors
+
+
];
+ push @nav, ['#document-errors' => 'Document Error'];
+
require Whatpm::ContentChecker;
my $onerror = sub {
my %opt = @_;
- print STDOUT get_node_path ($opt{node}) . ';' . $opt{type} . "\n";
+ my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level});
+ $type =~ tr/ /-/;
+ $type =~ s/\|/%7C/g;
+ $msg .= qq[ [Description]];
+ print STDOUT qq[- ] . get_node_link ($opt{node}) .
+ qq[
\n- ], $msg, "
\n";
};
- print STDOUT "#domerrors\n";
- $time1 = time;
+
+ my $elements;
if ($el) {
- Whatpm::ContentChecker->check_element ($el, $onerror);
+ $elements = Whatpm::ContentChecker->check_element ($el, $onerror);
} else {
- Whatpm::ContentChecker->check_document ($doc, $onerror);
+ $elements = Whatpm::ContentChecker->check_document ($doc, $onerror);
+ }
+
+ print STDOUT qq[
+
+];
+
+ if (@{$elements->{table}}) {
+ require JSON;
+
+ push @nav, ['#tables' => 'Tables'];
+ print STDOUT qq[
+
+
Tables
+
+
+
+
+];
+
+ my $i = 0;
+ for my $table_el (@{$elements->{table}}) {
+ $i++;
+ print STDOUT qq[
] .
+ get_node_link ($table_el) . q[
];
+
+ ## TODO: Make |ContentChecker| return |form_table| result
+ ## so that this script don't have to run the algorithm twice.
+ my $table = Whatpm::HTMLTable->form_table ($table_el);
+
+ for (@{$table->{column_group}}, @{$table->{column}}, $table->{caption}) {
+ next unless $_;
+ delete $_->{element};
+ }
+
+ for (@{$table->{row_group}}) {
+ next unless $_;
+ next unless $_->{element};
+ $_->{type} = $_->{element}->manakai_local_name;
+ delete $_->{element};
+ }
+
+ for (@{$table->{cell}}) {
+ next unless $_;
+ for (@{$_}) {
+ next unless $_;
+ for (@$_) {
+ $_->{id} = refaddr $_->{element} if defined $_->{element};
+ delete $_->{element};
+ $_->{is_header} = $_->{is_header} ? 1 : 0;
+ }
+ }
+ }
+
+ print STDOUT '];
+ }
+
+ print STDOUT qq[
];
+ }
+
+ if (keys %{$elements->{id}}) {
+ push @nav, ['#identifiers' => 'IDs'];
+ print STDOUT qq[
+
+
Identifiers
+
+
+];
+ for my $id (sort {$a cmp $b} keys %{$elements->{id}}) {
+ print STDOUT qq[- @{[htescape $id]}
];
+ for (@{$elements->{id}->{$id}}) {
+ print STDOUT qq[- ].get_node_link ($_).qq[
];
+ }
+ }
+ print STDOUT qq[
];
+ }
+
+ if (keys %{$elements->{term}}) {
+ push @nav, ['#terms' => 'Terms'];
+ print STDOUT qq[
+
+
Terms
+
+
+];
+ for my $term (sort {$a cmp $b} keys %{$elements->{term}}) {
+ print STDOUT qq[- @{[htescape $term]}
];
+ for (@{$elements->{term}->{$term}}) {
+ print STDOUT qq[- ].get_node_link ($_).qq[
];
+ }
+ }
+ print STDOUT qq[
];
+ }
+
+ if (keys %{$elements->{class}}) {
+ push @nav, ['#classes' => 'Classes'];
+ print STDOUT qq[
+
+
Classes
+
+
+];
+ for my $class (sort {$a cmp $b} keys %{$elements->{class}}) {
+ print STDOUT qq[- @{[htescape $class]}
];
+ for (@{$elements->{class}->{$class}}) {
+ print STDOUT qq[- ].get_node_link ($_).qq[
];
+ }
+ }
+ print STDOUT qq[
];
}
- $time2 = time;
- $time{check} = $time2 - $time1;
}
- print STDOUT "#log\n";
- for (qw/decode parse parse_xml serialize_html serialize_xml serialize_test
- check/) {
- next unless defined $time{$_};
- print STDOUT {
- decode => 'bytes->chars',
- parse => 'html5(chars)->dom5',
- parse_xml => 'xml1(chars)->dom5',
- serialize_html => 'dom5->html5(char)',
- serialize_xml => 'dom5->xml1(char)',
- serialize_test => 'dom5->test(char)',
- check => 'dom5 check',
- }->{$_};
- print STDOUT "\t", $time{$_}, "s\n";
- open my $file, '>>', ".manakai-$_.txt" or die ".manakai-$_.txt: $!";
- print $file $char_length, "\t", $time{$_}, "\n";
+ ## TODO: Show result
+} else {
+ print STDOUT qq[
+
+
+
+
+
Input Error: @{[htescape ($input->{error_status_text})]}
+
+];
+ push @nav, ['#result-summary' => 'Result'];
+
+}
+
+ print STDOUT qq[
+
+];
+ for (@nav) {
+ print STDOUT qq[- $_->[1]
];
}
+ print STDOUT qq[
+
+
+
+];
exit;
-sub test_serialize ($) {
+sub print_http_header_section ($) {
+ my $input = shift;
+ return unless defined $input->{header_status_code} or
+ defined $input->{header_status_text} or
+ @{$input->{header_field}};
+
+ push @nav, ['#source-header' => 'HTTP Header'];
+ print STDOUT qq[];
+} # print_http_header_section
+
+sub print_source_string_section ($$) {
+ require Encode;
+ my $enc = Encode::find_encoding ($_[1]); ## TODO: charset name -> Perl name
+ return unless $enc;
+
+ my $s = \($enc->decode (${$_[0]}));
+ my $i = 1;
+ push @nav, ['#source-string' => 'Source'];
+ print STDOUT qq[
+
Document Source
+
\n];
+ if (length $$s) {
+ while ($$s =~ /\G([^\x0A]*?)\x0D?\x0A/gc) {
+ print STDOUT qq[- ], htescape $1, "
\n";
+ $i++;
+ }
+ if ($$s =~ /\G([^\x0A]+)/gc) {
+ print STDOUT qq[- ], htescape $1, "
\n";
+ }
+ } else {
+ print STDOUT q[];
+ }
+ print STDOUT "
";
+} # print_input_string_section
+
+sub print_document_tree ($) {
my $node = shift;
- my $r = '';
+ my $r = '';
- my @node = map { [$_, ''] } @{$node->child_nodes};
+ my @node = ($node);
while (@node) {
my $child = shift @node;
- my $nt = $child->[0]->node_type;
- if ($nt == $child->[0]->ELEMENT_NODE) {
- $r .= '| ' . $child->[1] . '<' . $child->[0]->tag_name . ">\x0A"; ## ISSUE: case?
-
- for my $attr (sort {$a->[0] cmp $b->[0]} map { [$_->name, $_->value] }
- @{$child->[0]->attributes}) {
- $r .= '| ' . $child->[1] . ' ' . $attr->[0] . '="'; ## ISSUE: case?
- $r .= $attr->[1] . '"' . "\x0A";
- }
-
- unshift @node,
- map { [$_, $child->[1] . ' '] } @{$child->[0]->child_nodes};
- } elsif ($nt == $child->[0]->TEXT_NODE) {
- $r .= '| ' . $child->[1] . '"' . $child->[0]->data . '"' . "\x0A";
- } elsif ($nt == $child->[0]->CDATA_SECTION_NODE) {
- $r .= '| ' . $child->[1] . '[0]->data . "]]>\x0A";
- } elsif ($nt == $child->[0]->COMMENT_NODE) {
- $r .= '| ' . $child->[1] . '\x0A";
- } elsif ($nt == $child->[0]->DOCUMENT_TYPE_NODE) {
- $r .= '| ' . $child->[1] . '[0]->name . ">\x0A";
- } elsif ($nt == $child->[0]->PROCESSING_INSTRUCTION_NODE) {
- $r .= '| ' . $child->[1] . '' . $child->[0]->target . ' ' .
- $child->[0]->data . "?>\x0A";
+ unless (ref $child) {
+ $r .= $child;
+ next;
+ }
+
+ my $node_id = 'node-'.refaddr $child;
+ my $nt = $child->node_type;
+ if ($nt == $child->ELEMENT_NODE) {
+ my $child_nsuri = $child->namespace_uri;
+ $r .= qq[] . htescape ($child->tag_name) .
+ '
'; ## ISSUE: case
+
+ if ($child->has_attributes) {
+ $r .= '';
+ for my $attr (sort {$a->[0] cmp $b->[0]} map { [$_->name, $_->value, $_->namespace_uri, 'node-'.refaddr $_] }
+ @{$child->attributes}) {
+ $r .= qq[] . htescape ($attr->[0]) . '
= '; ## ISSUE: case?
+ $r .= '' . htescape ($attr->[1]) . '
'; ## TODO: children
+ }
+ $r .= '
';
+ }
+
+ if ($child->has_child_nodes) {
+ $r .= '';
+ unshift @node, @{$child->child_nodes}, '
';
+ } else {
+ $r .= '';
+ }
+ } elsif ($nt == $child->TEXT_NODE) {
+ $r .= qq'' . htescape ($child->data) . '
';
+ } elsif ($nt == $child->CDATA_SECTION_NODE) {
+ $r .= qq'<[CDATA[
' . htescape ($child->data) . '
]]>
';
+ } elsif ($nt == $child->COMMENT_NODE) {
+ $r .= qq'';
+ } elsif ($nt == $child->DOCUMENT_NODE) {
+ $r .= qq'- Document';
+ $r .= qq[
];
+ $r .= qq[- @{[scalar get_text ('manakaiIsHTML:'.($child->manakai_is_html?1:0))]}
];
+ $r .= qq[- @{[scalar get_text ('manakaiCompatMode:'.$child->manakai_compat_mode)]}
];
+ unless ($child->manakai_is_html) {
+ $r .= qq[- XML version =
@{[htescape ($child->xml_version)]}
];
+ if (defined $child->xml_encoding) {
+ $r .= qq[- XML encoding =
@{[htescape ($child->xml_encoding)]}
];
+ } else {
+ $r .= qq[- XML encoding = (null)
];
+ }
+ $r .= qq[- XML standalone = @{[$child->xml_standalone ? 'true' : 'false']}
];
+ }
+ $r .= qq[
];
+ if ($child->has_child_nodes) {
+ $r .= '';
+ unshift @node, @{$child->child_nodes}, '
';
+ }
+ } elsif ($nt == $child->DOCUMENT_TYPE_NODE) {
+ $r .= qq'<!DOCTYPE>
';
+ $r .= qq[- Name =
@{[htescape ($child->name)]}
];
+ $r .= qq[- Public identifier =
@{[htescape ($child->public_id)]}
];
+ $r .= qq[- System identifier =
@{[htescape ($child->system_id)]}
];
+ $r .= '
';
+ } elsif ($nt == $child->PROCESSING_INSTRUCTION_NODE) {
+ $r .= qq'<?@{[htescape ($child->target)]}
@{[htescape ($child->data)]}
?>
';
} else {
- $r .= '| ' . $child->[1] . $child->[0]->node_type . "\x0A"; # error
+ $r .= qq'- @{[$child->node_type]} @{[htescape ($child->node_name)]}
'; # error
}
}
-
- return \$r;
-} # test_serialize
+
+ $r .= '
';
+ print STDOUT $r;
+} # print_document_tree
sub get_node_path ($) {
my $node = shift;
@@ -212,6 +501,7 @@
$rs = '"' . $node->data . '"';
$node = $node->parent_node;
} elsif ($node->node_type == 9) {
+ @r = ('') unless @r;
$rs = '';
$node = $node->parent_node;
} else {
@@ -223,6 +513,196 @@
return join '/', @r;
} # get_node_path
+sub get_node_link ($) {
+ return qq[] .
+ htescape (get_node_path ($_[0])) . qq[];
+} # get_node_link
+
+{
+ my $Msg = {};
+
+sub load_text_catalog ($) {
+ my $lang = shift; # MUST be a canonical lang name
+ open my $file, '<', "cc-msg.$lang.txt" or die "$0: cc-msg.$lang.txt: $!";
+ while (<$file>) {
+ if (s/^([^;]+);([^;]*);//) {
+ my ($type, $cls, $msg) = ($1, $2, $_);
+ $msg =~ tr/\x0D\x0A//d;
+ $Msg->{$type} = [$cls, $msg];
+ }
+ }
+} # load_text_catalog
+
+sub get_text ($) {
+ my ($type, $level) = @_;
+ $type = $level . ':' . $type if defined $level;
+ my @arg;
+ {
+ if (defined $Msg->{$type}) {
+ my $msg = $Msg->{$type}->[1];
+ $msg =~ s{\$([0-9]+)}{
+ defined $arg[$1] ? htescape ($arg[$1]) : '(undef)';
+ }ge;
+ return ($type, $Msg->{$type}->[0], $msg);
+ } elsif ($type =~ s/:([^:]*)$//) {
+ unshift @arg, $1;
+ redo;
+ }
+ }
+ return ($type, '', htescape ($_[0]));
+} # get_text
+
+}
+
+sub get_input_document ($$) {
+ my ($http, $dom) = @_;
+
+ my $request_uri = $http->parameter ('uri');
+ my $r = {};
+ if (defined $request_uri and length $request_uri) {
+ my $uri = $dom->create_uri_reference ($request_uri);
+ unless ({
+ http => 1,
+ }->{lc $uri->uri_scheme}) {
+ return {uri => $request_uri, request_uri => $request_uri,
+ error_status_text => 'URI scheme not allowed'};
+ }
+
+ require Message::Util::HostPermit;
+ my $host_permit = new Message::Util::HostPermit;
+ $host_permit->add_rule (<check ($uri->uri_host, $uri->uri_port || 80)) {
+ return {uri => $request_uri, request_uri => $request_uri,
+ error_status_text => 'Connection to the host is forbidden'};
+ }
+
+ require LWP::UserAgent;
+ my $ua = WDCC::LWPUA->new;
+ $ua->{wdcc_dom} = $dom;
+ $ua->{wdcc_host_permit} = $host_permit;
+ $ua->agent ('Mozilla'); ## TODO: for now.
+ $ua->parse_head (0);
+ $ua->protocols_allowed ([qw/http/]);
+ $ua->max_size (1000_000);
+ my $req = HTTP::Request->new (GET => $request_uri);
+ my $res = $ua->request ($req);
+ if ($res->is_success or $http->parameter ('error-page')) {
+ $r->{base_uri} = $res->base; ## NOTE: It does check |Content-Base|, |Content-Location|, and . ## TODO: Use our own code!
+ $r->{uri} = $res->request->uri;
+ $r->{request_uri} = $request_uri;
+
+ ## TODO: More strict parsing...
+ my $ct = $res->header ('Content-Type');
+ if (defined $ct and $ct =~ m#^([0-9A-Za-z._+-]+/[0-9A-Za-z._+-]+)#) {
+ $r->{media_type} = lc $1;
+ }
+ if (defined $ct and $ct =~ /;\s*charset\s*=\s*"?(\S+)"?/i) {
+ $r->{charset} = lc $1;
+ $r->{charset} =~ tr/\\//d;
+ }
+
+ my $input_charset = $http->parameter ('charset');
+ if (defined $input_charset and length $input_charset) {
+ $r->{charset_overridden}
+ = (not defined $r->{charset} or $r->{charset} ne $input_charset);
+ $r->{charset} = $input_charset;
+ }
+
+ $r->{s} = ''.$res->content;
+ } else {
+ $r->{uri} = $res->request->uri;
+ $r->{request_uri} = $request_uri;
+ $r->{error_status_text} = $res->status_line;
+ }
+
+ $r->{header_field} = [];
+ $res->scan (sub {
+ push @{$r->{header_field}}, [$_[0], $_[1]];
+ });
+ $r->{header_status_code} = $res->code;
+ $r->{header_status_text} = $res->message;
+ } else {
+ $r->{s} = ''.$http->parameter ('s');
+ $r->{uri} = q;
+ $r->{request_uri} = q;
+ $r->{base_uri} = q;
+ $r->{charset} = ''.$http->parameter ('_charset_');
+ $r->{charset} =~ s/\s+//g;
+ $r->{charset} = 'utf-8' if $r->{charset} eq '';
+ $r->{header_field} = [];
+ }
+
+ my $input_format = $http->parameter ('i');
+ if (defined $input_format and length $input_format) {
+ $r->{media_type_overridden}
+ = (not defined $r->{media_type} or $input_format ne $r->{media_type});
+ $r->{media_type} = $input_format;
+ }
+ if (defined $r->{s} and not defined $r->{media_type}) {
+ $r->{media_type} = 'text/html';
+ $r->{media_type_overridden} = 1;
+ }
+
+ if ($r->{media_type} eq 'text/xml') {
+ unless (defined $r->{charset}) {
+ $r->{charset} = 'us-ascii';
+ } elsif ($r->{charset_overridden} and $r->{charset} eq 'us-ascii') {
+ $r->{charset_overridden} = 0;
+ }
+ }
+
+ if (length $r->{s} > 1000_000) {
+ $r->{error_status_text} = 'Entity-body too large';
+ delete $r->{s};
+ return $r;
+ }
+
+ return $r;
+} # get_input_document
+
+package WDCC::LWPUA;
+BEGIN { push our @ISA, 'LWP::UserAgent'; }
+
+sub redirect_ok {
+ my $ua = shift;
+ unless ($ua->SUPER::redirect_ok (@_)) {
+ return 0;
+ }
+
+ my $uris = $_[1]->header ('Location');
+ return 0 unless $uris;
+ my $uri = $ua->{wdcc_dom}->create_uri_reference ($uris);
+ unless ({
+ http => 1,
+ }->{lc $uri->uri_scheme}) {
+ return 0;
+ }
+ unless ($ua->{wdcc_host_permit}->check ($uri->uri_host, $uri->uri_port || 80)) {
+ return 0;
+ }
+ return 1;
+} # redirect_ok
+
=head1 AUTHOR
Wakaba .
@@ -236,4 +716,4 @@
=cut
-## $Date: 2007/06/27 11:08:03 $
+## $Date: 2007/07/17 14:28:20 $