--- test/html-webhacc/cc.cgi 2007/07/17 14:28:20 1.14 +++ test/html-webhacc/cc.cgi 2008/07/20 16:53:10 1.54 @@ -1,45 +1,36 @@ #!/usr/bin/perl use strict; +use utf8; use lib qw[/home/httpd/html/www/markup/html/whatpm - /home/wakaba/work/manakai/lib - /home/wakaba/public_html/-temp/wiki/lib]; + /home/wakaba/work/manakai2/lib]; use CGI::Carp qw[fatalsToBrowser]; use Scalar::Util qw[refaddr]; -use SuikaWiki::Input::HTTP; ## TODO: Use some better CGI module + require WebHACC::Input; + require WebHACC::Result; + require WebHACC::Output; -sub htescape ($) { - my $s = $_[0]; - $s =~ s/&/&/g; - $s =~ s//>/g; - $s =~ s/"/"/g; - $s =~ s{([\x00-\x09\x0B-\x1F\x7F-\xA0\x{FEFF}\x{FFFC}-\x{FFFF}])}{ - sprintf 'U+%04X', ord $1; - }ge; - return $s; -} # htescape +my $out; -my $http = SuikaWiki::Input::HTTP->new; - -## TODO: _charset_ + require Message::DOM::DOMImplementation; + my $dom = Message::DOM::DOMImplementation->new; +{ + use Message::CGI::HTTP; + my $http = Message::CGI::HTTP->new; - if ($http->meta_variable ('PATH_INFO') ne '/') { + if ($http->get_meta_variable ('PATH_INFO') ne '/') { print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n400"; exit; } - - binmode STDOUT, ':utf8'; - $| = 1; - - require Message::DOM::DOMImplementation; - my $dom = Message::DOM::DOMImplementation->new; - + load_text_catalog ('en'); ## TODO: conneg - my @nav; - print STDOUT qq[Content-Type: text/html; charset=utf-8 + $out = WebHACC::Output->new; + $out->handle (*STDOUT); + $out->set_utf8; + $out->set_flush; + $out->html (qq[Content-Type: text/html; charset=utf-8 @@ -50,175 +41,199 @@

Web Document Conformance Checker (beta)

-]; +]); - $| = 0; my $input = get_input_document ($http, $dom); - my $inner_html_element = $http->parameter ('e'); - - print qq[ -
-
-
Request URI
-
<@{[htescape $input->{request_uri}]}>
-
Document URI
-
<@{[htescape $input->{uri}]}>
-]; # no
yet - push @nav, ['#document-info' => 'Information']; + $out->input ($input); + $out->unset_flush; -if (defined $input->{s}) { + my $char_length = 0; - print STDOUT qq[ -
Base URI
-
<@{[htescape $input->{base_uri}]}>
-
Internet Media Type
-
@{[htescape $input->{media_type}]} - @{[$input->{media_type_overridden} ? '(overridden)' : '']}
-
Character Encoding
-
@{[defined $input->{charset} ? ''.htescape ($input->{charset}).'' : '(none)']} - @{[$input->{charset_overridden} ? '(overridden)' : '']}
+ $out->start_section (id => 'document-info', title => 'Information'); + $out->html (qq[
+
Request URL
+
]); + $out->url ($input->{request_uri}); + $out->html (q[
Document URL +
]); + $out->url ($input->{uri}, id => 'anchor-document-url'); + $out->html (q[ + ]); + ## NOTE: no
yet + + if (defined $input->{s}) { + $char_length = length $input->{s}; + + $out->html (qq[
Base URI
]); + $out->url ($input->{base_uri}); + $out->html (qq[
Internet Media Type
+
]); + $out->text ($input->{media_type}); + $out->html (qq[ ]); + if ($input->{media_type_overridden}) { + $out->html ('(overridden)'); + } elsif (defined $input->{official_type}) { + if ($input->{media_type} eq $input->{official_type}) { + # + } else { + $out->html ('(sniffed; official type is: '); + $out->text ($input->{official_type}); + $out->html (')'); + } + } else { + $out->html ('(sniffed)'); + } + $out->html (q[
Character Encoding
]); + if (defined $input->{charset}) { + $out->html (''); + $out->text ($input->{charset}); + $out->html (''); + } else { + $out->text ('(none)'); + } + $out->html (' overridden') if $input->{charset_overridden}; + $out->html (qq[ +
Length
+
$char_length byte@{[$char_length == 1 ? '' : 's']}
-
-]; - - print_http_header_section ($input); - - my $doc; - my $el; - - if ($input->{media_type} eq 'text/html') { - require Encode; - require Whatpm::HTML; - $input->{charset} ||= 'ISO-8859-1'; ## TODO: for now. - - my $t = Encode::decode ($input->{charset}, $input->{s}); - - print STDOUT qq[ -
-

Parse Errors

+ +]); + $out->end_section; + + my $result = WebHACC::Result->new; + $result->output ($out); + $result->{conforming_min} = 1; + $result->{conforming_max} = 1; + check_and_print ($input => $result => $out); + $result->generate_result_section; + } else { + $out->html (''); + $out->end_section; -
]; - push @nav, ['#parse-errors' => 'Parse Error']; + my $result = WebHACC::Result->new; + $result->output ($out); + $result->{conforming_min} = 0; + $result->{conforming_max} = 1; - my $onerror = sub { - my (%opt) = @_; - my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level}); - if ($opt{column} > 0) { - print STDOUT qq[
Line $opt{line} column $opt{column}
\n]; - } else { - $opt{line} = $opt{line} - 1 || 1; - print STDOUT qq[
Line $opt{line}
\n]; - } - $type =~ tr/ /-/; - $type =~ s/\|/%7C/g; - $msg .= qq[ [Description]]; - print STDOUT qq[
$msg
\n]; - }; - - $doc = $dom->create_document; - if (defined $inner_html_element and length $inner_html_element) { - $el = $doc->create_element_ns - ('http://www.w3.org/1999/xhtml', [undef, $inner_html_element]); - Whatpm::HTML->set_inner_html ($el, $t, $onerror); - } else { - Whatpm::HTML->parse_string ($t => $doc, $onerror); + $input->generate_transfer_sections ($result); + $result->generate_result_section; } - print STDOUT qq[
-
-]; + $out->nav_list; - print_source_string_section (\($input->{s}), $input->{charset}); - } elsif ({ - 'text/xml' => 1, - 'application/xhtml+xml' => 1, - 'application/xml' => 1, - }->{$input->{media_type}}) { - require Message::DOM::XMLParserTemp; - - print STDOUT qq[ -
-

Parse Errors

+ exit; +} -
]; - push @nav, ['#parse-errors' => 'Parse Error']; +sub check_and_print ($$$) { + my ($input, $result, $out) = @_; + my $original_input = $out->input; + $out->input ($input); + + $input->generate_transfer_sections ($result); + + my @subdoc; + + my $checker_class = { + 'text/cache-manifest' => 'WebHACC::Language::CacheManifest', + 'text/css' => 'WebHACC::Language::CSS', + 'text/html' => 'WebHACC::Language::HTML', + 'text/x-webidl' => 'WebHACC::Language::WebIDL', + + 'text/xml' => 'WebHACC::Language::XML', + 'application/atom+xml' => 'WebHACC::Language::XML', + 'application/rss+xml' => 'WebHACC::Language::XML', + 'image/svg+xml' => 'WebHACC::Language::XML', + 'application/xhtml+xml' => 'WebHACC::Language::XML', + 'application/xml' => 'WebHACC::Language::XML', + ## TODO: Should we make all XML MIME Types fall + ## into this category? + + ## NOTE: This type has different model from normal XML types. + 'application/rdf+xml' => 'WebHACC::Language::XML', + }->{$input->{media_type}} || 'WebHACC::Language::Default'; + + eval qq{ require $checker_class } or die "$0: Loading $checker_class: $@"; + my $checker = $checker_class->new; + $checker->input ($input); + $checker->output ($out); + $checker->result ($result); + + ## TODO: A cache manifest MUST be text/cache-manifest + ## TODO: WebIDL media type "text/x-webidl" + + $checker->generate_syntax_error_section; + $checker->generate_source_string_section; + + $checker->onsubdoc (sub { + push @subdoc, shift; + }); + + $checker->generate_structure_dump_section; + $checker->generate_structure_error_section; + $checker->generate_additional_sections; - my $onerror = sub { - my $err = shift; - my $line = $err->location->line_number; - print STDOUT qq[
Line $line column ]; - print STDOUT $err->location->column_number, "
"; - print STDOUT htescape $err->text, "
\n"; - return 1; - }; - - open my $fh, '<', \($input->{s}); - $doc = Message::DOM::XMLParserTemp->parse_byte_stream - ($fh => $dom, $onerror, charset => $input->{charset}); +=pod - print STDOUT qq[
-
+ if (defined $doc or defined $el) { -]; - print_source_string_section (\($input->{s}), $doc->input_encoding); - } else { - ## TODO: Change HTTP status code?? - print STDOUT qq[ -
-

Media type @{[htescape $input->{media_type}]} is not supported!

-
-]; - push @nav, ['#result-summary' => 'Result']; + print_table_section ($input, $elements->{table}) if @{$elements->{table}}; + print_listing_section ({ + id => 'identifiers', label => 'IDs', heading => 'Identifiers', + }, $input, $elements->{id}) if keys %{$elements->{id}}; + print_listing_section ({ + id => 'terms', label => 'Terms', heading => 'Terms', + }, $input, $elements->{term}) if keys %{$elements->{term}}; + print_listing_section ({ + id => 'classes', label => 'Classes', heading => 'Classes', + }, $input, $elements->{class}) if keys %{$elements->{class}}; + + print_rdf_section ($input, $elements->{rdf}) if @{$elements->{rdf}}; } +=cut - if (defined $doc or defined $el) { - print STDOUT qq[ -
-

Document Tree

-]; - push @nav, ['#document-tree' => 'Tree']; - - print_document_tree ($el || $doc); - + my $id_prefix = 0; + for my $_subinput (@subdoc) { + my $subinput = WebHACC::Input->new; + $subinput->{$_} = $_subinput->{$_} for keys %$_subinput; + $subinput->id_prefix ('subdoc-' . ++$id_prefix); + $subinput->nested (1); + $subinput->{base_uri} = $subinput->{container_node}->base_uri + unless defined $subinput->{base_uri}; + my $ebaseuri = htescape ($subinput->{base_uri}); + $out->start_section (id => $subinput->id_prefix, + title => qq[Subdocument #$id_prefix]); print STDOUT qq[ -
- -
-

Document Errors

- -
]; - push @nav, ['#document-errors' => 'Document Error']; +
+
Internet Media Type
+
@{[htescape $subinput->{media_type}]} +
Container Node
+
@{[get_node_link ($input, $subinput->{container_node})]}
+
Base URI
+
<$ebaseuri>
+
]; + + $subinput->{id_prefix} .= '-'; + check_and_print ($subinput => $result => $out); - require Whatpm::ContentChecker; - my $onerror = sub { - my %opt = @_; - my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level}); - $type =~ tr/ /-/; - $type =~ s/\|/%7C/g; - $msg .= qq[ [Description]]; - print STDOUT qq[
] . get_node_link ($opt{node}) . - qq[
\n
], $msg, "
\n"; - }; - - my $elements; - if ($el) { - $elements = Whatpm::ContentChecker->check_element ($el, $onerror); - } else { - $elements = Whatpm::ContentChecker->check_document ($doc, $onerror); - } - - print STDOUT qq[
-
-]; + $out->end_section; + } - if (@{$elements->{table}}) { - require JSON; + $out->input ($original_input); +} # check_and_print - push @nav, ['#tables' => 'Tables']; - print STDOUT qq[ -
+sub print_table_section ($$) { + my ($input, $tables) = @_; + +# push @nav, [qq[#$input->{id_prefix}tables] => 'Tables'] +# unless $input->{nested}; + print STDOUT qq[ +

Tables

@@ -227,303 +242,135 @@

Structure of tables are visualized here if scripting is enabled.

]; - - my $i = 0; - for my $table_el (@{$elements->{table}}) { - $i++; - print STDOUT qq[

] . - get_node_link ($table_el) . q[

]; - - ## TODO: Make |ContentChecker| return |form_table| result - ## so that this script don't have to run the algorithm twice. - my $table = Whatpm::HTMLTable->form_table ($table_el); - - for (@{$table->{column_group}}, @{$table->{column}}, $table->{caption}) { - next unless $_; - delete $_->{element}; - } - - for (@{$table->{row_group}}) { - next unless $_; - next unless $_->{element}; - $_->{type} = $_->{element}->manakai_local_name; - delete $_->{element}; - } - - for (@{$table->{cell}}) { - next unless $_; - for (@{$_}) { - next unless $_; - for (@$_) { - $_->{id} = refaddr $_->{element} if defined $_->{element}; - delete $_->{element}; - $_->{is_header} = $_->{is_header} ? 1 : 0; - } - } - } - - print STDOUT '
]; - } - - print STDOUT qq[
]; - } - - if (keys %{$elements->{id}}) { - push @nav, ['#identifiers' => 'IDs']; - print STDOUT qq[ -
-

Identifiers

- -
-]; - for my $id (sort {$a cmp $b} keys %{$elements->{id}}) { - print STDOUT qq[
@{[htescape $id]}
]; - for (@{$elements->{id}->{$id}}) { - print STDOUT qq[
].get_node_link ($_).qq[
]; - } - } - print STDOUT qq[
]; + + require JSON; + + my $i = 0; + for my $table (@$tables) { + $i++; + print STDOUT qq[

] . + get_node_link ($input, $table->{element}) . q[

]; + + delete $table->{element}; + + for (@{$table->{column_group}}, @{$table->{column}}, $table->{caption}, + @{$table->{row}}) { + next unless $_; + delete $_->{element}; } - - if (keys %{$elements->{term}}) { - push @nav, ['#terms' => 'Terms']; - print STDOUT qq[ -
-

Terms

- -
-]; - for my $term (sort {$a cmp $b} keys %{$elements->{term}}) { - print STDOUT qq[
@{[htescape $term]}
]; - for (@{$elements->{term}->{$term}}) { - print STDOUT qq[
].get_node_link ($_).qq[
]; - } - } - print STDOUT qq[
]; + + for (@{$table->{row_group}}) { + next unless $_; + next unless $_->{element}; + $_->{type} = $_->{element}->manakai_local_name; + delete $_->{element}; } - - if (keys %{$elements->{class}}) { - push @nav, ['#classes' => 'Classes']; - print STDOUT qq[ -
-

Classes

- -
-]; - for my $class (sort {$a cmp $b} keys %{$elements->{class}}) { - print STDOUT qq[
@{[htescape $class]}
]; - for (@{$elements->{class}->{$class}}) { - print STDOUT qq[
].get_node_link ($_).qq[
]; + + for (@{$table->{cell}}) { + next unless $_; + for (@{$_}) { + next unless $_; + for (@$_) { + $_->{id} = refaddr $_->{element} if defined $_->{element}; + delete $_->{element}; + $_->{is_header} = $_->{is_header} ? 1 : 0; } } - print STDOUT qq[
]; } + + print STDOUT '
]; } + + print STDOUT qq[
]; +} # print_table_section - ## TODO: Show result -} else { +sub print_listing_section ($$$) { + my ($opt, $input, $ids) = @_; + +# push @nav, ['#' . $input->{id_prefix} . $opt->{id} => $opt->{label}] +# unless $input->{nested}; print STDOUT qq[ - - - -
-

Input Error: @{[htescape ($input->{error_status_text})]}

-
-]; - push @nav, ['#result-summary' => 'Result']; - -} +
+

$opt->{heading}

- print STDOUT qq[ - - - -]; + print STDOUT qq[
]; +} # print_listing_section -exit; - -sub print_http_header_section ($) { - my $input = shift; - return unless defined $input->{header_status_code} or - defined $input->{header_status_text} or - @{$input->{header_field}}; - - push @nav, ['#source-header' => 'HTTP Header']; - print STDOUT qq[
-

HTTP Header

- -

Note: Due to the limitation of the -network library in use, the content of this section might -not be the real header.

- - -]; - if (defined $input->{header_status_code}) { - print STDOUT qq[]; - print STDOUT qq[]; - } - if (defined $input->{header_status_text}) { - print STDOUT qq[]; - print STDOUT qq[]; - } +sub print_rdf_section ($$$) { + my ($input, $rdfs) = @_; - for (@{$input->{header_field}}) { - print STDOUT qq[]; - print STDOUT qq[]; - } - - print STDOUT qq[
Status code@{[htescape ($input->{header_status_code})]}
Status text@{[htescape ($input->{header_status_text})]}
@{[htescape ($_->[0])]}@{[htescape ($_->[1])]}
]; -} # print_http_header_section - -sub print_source_string_section ($$) { - require Encode; - my $enc = Encode::find_encoding ($_[1]); ## TODO: charset name -> Perl name - return unless $enc; +# push @nav, ['#' . $input->{id_prefix} . 'rdf' => 'RDF'] +# unless $input->{nested}; + print STDOUT qq[ +
+

RDF Triples

- my $s = \($enc->decode (${$_[0]})); - my $i = 1; - push @nav, ['#source-string' => 'Source']; - print STDOUT qq[
-

Document Source

-
    \n]; - if (length $$s) { - while ($$s =~ /\G([^\x0A]*?)\x0D?\x0A/gc) { - print STDOUT qq[
  1. ], htescape $1, "
  2. \n"; - $i++; - } - if ($$s =~ /\G([^\x0A]+)/gc) { - print STDOUT qq[
  3. ], htescape $1, "
  4. \n"; +
    ]; + my $i = 0; + for my $rdf (@$rdfs) { + print STDOUT qq[
    ]; + print STDOUT get_node_link ($input, $rdf->[0]); + print STDOUT qq[
    ]; + for my $triple (@{$rdf->[1]}) { + print STDOUT '
    ' . get_node_link ($input, $triple->[0]) . '
    '; + print STDOUT get_rdf_resource_html ($triple->[1]); + print STDOUT ' '; + print STDOUT get_rdf_resource_html ($triple->[2]); + print STDOUT ' '; + print STDOUT get_rdf_resource_html ($triple->[3]); + } + print STDOUT qq[
    ]; + } + print STDOUT qq[
]; +} # print_rdf_section + +sub get_rdf_resource_html ($) { + my $resource = shift; + if (defined $resource->{uri}) { + my $euri = htescape ($resource->{uri}); + return '<' . $euri . + '>'; + } elsif (defined $resource->{bnodeid}) { + return htescape ('_:' . $resource->{bnodeid}); + } elsif ($resource->{nodes}) { + return '(rdf:XMLLiteral)'; + } elsif (defined $resource->{value}) { + my $elang = htescape (defined $resource->{language} + ? $resource->{language} : ''); + my $r = qq[] . htescape ($resource->{value}) . ''; + if (defined $resource->{datatype}) { + my $euri = htescape ($resource->{datatype}); + $r .= '^^<' . $euri . + '>'; + } elsif (length $resource->{language}) { + $r .= '@' . htescape ($resource->{language}); } + return $r; } else { - print STDOUT q[
  • ]; - } - print STDOUT "
    "; -} # print_input_string_section - -sub print_document_tree ($) { - my $node = shift; - my $r = '
      '; - - my @node = ($node); - while (@node) { - my $child = shift @node; - unless (ref $child) { - $r .= $child; - next; - } - - my $node_id = 'node-'.refaddr $child; - my $nt = $child->node_type; - if ($nt == $child->ELEMENT_NODE) { - my $child_nsuri = $child->namespace_uri; - $r .= qq[
    1. ] . htescape ($child->tag_name) . - ''; ## ISSUE: case - - if ($child->has_attributes) { - $r .= '
        '; - for my $attr (sort {$a->[0] cmp $b->[0]} map { [$_->name, $_->value, $_->namespace_uri, 'node-'.refaddr $_] } - @{$child->attributes}) { - $r .= qq[
      • ] . htescape ($attr->[0]) . ' = '; ## ISSUE: case? - $r .= '' . htescape ($attr->[1]) . '
      • '; ## TODO: children - } - $r .= '
      '; - } - - if ($child->has_child_nodes) { - $r .= '
        '; - unshift @node, @{$child->child_nodes}, '
    2. '; - } else { - $r .= ''; - } - } elsif ($nt == $child->TEXT_NODE) { - $r .= qq'
    3. ' . htescape ($child->data) . '
    4. '; - } elsif ($nt == $child->CDATA_SECTION_NODE) { - $r .= qq'
    5. <[CDATA[' . htescape ($child->data) . ']]>
    6. '; - } elsif ($nt == $child->COMMENT_NODE) { - $r .= qq'
    7. <!--' . htescape ($child->data) . '-->
    8. '; - } elsif ($nt == $child->DOCUMENT_NODE) { - $r .= qq'
    9. Document'; - $r .= qq[
        ]; - $r .= qq[
      • @{[scalar get_text ('manakaiIsHTML:'.($child->manakai_is_html?1:0))]}
      • ]; - $r .= qq[
      • @{[scalar get_text ('manakaiCompatMode:'.$child->manakai_compat_mode)]}
      • ]; - unless ($child->manakai_is_html) { - $r .= qq[
      • XML version = @{[htescape ($child->xml_version)]}
      • ]; - if (defined $child->xml_encoding) { - $r .= qq[
      • XML encoding = @{[htescape ($child->xml_encoding)]}
      • ]; - } else { - $r .= qq[
      • XML encoding = (null)
      • ]; - } - $r .= qq[
      • XML standalone = @{[$child->xml_standalone ? 'true' : 'false']}
      • ]; - } - $r .= qq[
      ]; - if ($child->has_child_nodes) { - $r .= '
        '; - unshift @node, @{$child->child_nodes}, '
    10. '; - } - } elsif ($nt == $child->DOCUMENT_TYPE_NODE) { - $r .= qq'
    11. <!DOCTYPE>
        '; - $r .= qq[
      • Name = @{[htescape ($child->name)]}
      • ]; - $r .= qq[
      • Public identifier = @{[htescape ($child->public_id)]}
      • ]; - $r .= qq[
      • System identifier = @{[htescape ($child->system_id)]}
      • ]; - $r .= '
    12. '; - } elsif ($nt == $child->PROCESSING_INSTRUCTION_NODE) { - $r .= qq'
    13. <?@{[htescape ($child->target)]} @{[htescape ($child->data)]}?>
    14. '; - } else { - $r .= qq'
    15. @{[$child->node_type]} @{[htescape ($child->node_name)]}
    16. '; # error - } + return '??'; } - - $r .= '
    '; - print STDOUT $r; -} # print_document_tree - -sub get_node_path ($) { - my $node = shift; - my @r; - while (defined $node) { - my $rs; - if ($node->node_type == 1) { - $rs = $node->manakai_local_name; - $node = $node->parent_node; - } elsif ($node->node_type == 2) { - $rs = '@' . $node->manakai_local_name; - $node = $node->owner_element; - } elsif ($node->node_type == 3) { - $rs = '"' . $node->data . '"'; - $node = $node->parent_node; - } elsif ($node->node_type == 9) { - @r = ('') unless @r; - $rs = ''; - $node = $node->parent_node; - } else { - $rs = '#' . $node->node_type; - $node = $node->parent_node; - } - unshift @r, $rs; - } - return join '/', @r; -} # get_node_path - -sub get_node_link ($) { - return qq[] . - htescape (get_node_path ($_[0])) . qq[]; -} # get_node_link +} # get_rdf_resource_html { my $Msg = {}; sub load_text_catalog ($) { +# my $self = shift; my $lang = shift; # MUST be a canonical lang name - open my $file, '<', "cc-msg.$lang.txt" or die "$0: cc-msg.$lang.txt: $!"; + open my $file, '<:utf8', "cc-msg.$lang.txt" + or die "$0: cc-msg.$lang.txt: $!"; while (<$file>) { if (s/^([^;]+);([^;]*);//) { my ($type, $cls, $msg) = ($1, $2, $_); @@ -533,23 +380,43 @@ } } # load_text_catalog -sub get_text ($) { - my ($type, $level) = @_; +sub get_text ($;$$) { +# my $self = shift; + my ($type, $level, $node) = @_; $type = $level . ':' . $type if defined $level; + $level = 'm' unless defined $level; my @arg; { if (defined $Msg->{$type}) { my $msg = $Msg->{$type}->[1]; $msg =~ s{\$([0-9]+)}{ - defined $arg[$1] ? htescape ($arg[$1]) : '(undef)'; + defined $arg[$1] ? ($arg[$1]) : '(undef)'; + }ge; ##BUG: ^ must be escaped + $msg =~ s{{\@([A-Za-z0-9:_.-]+)}}{ + UNIVERSAL::can ($node, 'get_attribute_ns') + ? ($node->get_attribute_ns (undef, $1)) : '' + }ge; ## BUG: ^ must be escaped + $msg =~ s{{\@}}{ ## BUG: v must be escaped + UNIVERSAL::can ($node, 'value') ? ($node->value) : '' + }ge; + $msg =~ s{{local-name}}{ + UNIVERSAL::can ($node, 'manakai_local_name') + ? ($node->manakai_local_name) : '' + }ge; ## BUG: ^ must be escaped + $msg =~ s{{element-local-name}}{ + (UNIVERSAL::can ($node, 'owner_element') and + $node->owner_element) + ? ($node->owner_element->manakai_local_name) + : '' ## BUG: ^ must be escaped }ge; - return ($type, $Msg->{$type}->[0], $msg); + return ($type, 'level-' . $level . ' ' . $Msg->{$type}->[0], $msg); } elsif ($type =~ s/:([^:]*)$//) { unshift @arg, $1; redo; } } - return ($type, '', htescape ($_[0])); + return ($type, 'level-'.$level, ($_[0])); + ## BUG: ^ must be escaped } # get_text } @@ -557,15 +424,18 @@ sub get_input_document ($$) { my ($http, $dom) = @_; - my $request_uri = $http->parameter ('uri'); - my $r = {}; + require Encode; + my $request_uri = Encode::decode ('utf-8', $http->get_parameter ('uri')); + my $r = WebHACC::Input->new; if (defined $request_uri and length $request_uri) { my $uri = $dom->create_uri_reference ($request_uri); unless ({ http => 1, }->{lc $uri->uri_scheme}) { - return {uri => $request_uri, request_uri => $request_uri, - error_status_text => 'URI scheme not allowed'}; + $r = WebHACC::Input::Error->new; + $r->{uri} = $request_uri; + $r->{request_uri} = $request_uri; + $r->{error_status_text} = 'URL scheme not allowed'; } require Message::Util::HostPermit; @@ -592,8 +462,11 @@ Allow host=* EOH unless ($host_permit->check ($uri->uri_host, $uri->uri_port || 80)) { - return {uri => $request_uri, request_uri => $request_uri, - error_status_text => 'Connection to the host is forbidden'}; + my $r = WebHACC::Input::Error->new; + $r->{uri} = $request_uri; + $r->{request_uri} = $request_uri; + $r->{error_status_text} = 'Connection to the host is forbidden'; + return $r; } require LWP::UserAgent; @@ -605,30 +478,43 @@ $ua->protocols_allowed ([qw/http/]); $ua->max_size (1000_000); my $req = HTTP::Request->new (GET => $request_uri); + $req->header ('Accept-Encoding' => 'identity, *; q=0'); my $res = $ua->request ($req); - if ($res->is_success or $http->parameter ('error-page')) { + ## TODO: 401 sets |is_success| true. + if ($res->is_success or $http->get_parameter ('error-page')) { $r->{base_uri} = $res->base; ## NOTE: It does check |Content-Base|, |Content-Location|, and . ## TODO: Use our own code! $r->{uri} = $res->request->uri; $r->{request_uri} = $request_uri; ## TODO: More strict parsing... my $ct = $res->header ('Content-Type'); - if (defined $ct and $ct =~ m#^([0-9A-Za-z._+-]+/[0-9A-Za-z._+-]+)#) { - $r->{media_type} = lc $1; - } - if (defined $ct and $ct =~ /;\s*charset\s*=\s*"?(\S+)"?/i) { + if (defined $ct and $ct =~ /;\s*charset\s*=\s*"?([^\s;"]+)"?/i) { $r->{charset} = lc $1; $r->{charset} =~ tr/\\//d; + $r->{official_charset} = $r->{charset}; } - my $input_charset = $http->parameter ('charset'); + my $input_charset = $http->get_parameter ('charset'); if (defined $input_charset and length $input_charset) { $r->{charset_overridden} = (not defined $r->{charset} or $r->{charset} ne $input_charset); $r->{charset} = $input_charset; - } + } + + ## TODO: Support for HTTP Content-Encoding $r->{s} = ''.$res->content; + + require Whatpm::ContentType; + ($r->{official_type}, $r->{media_type}) + = Whatpm::ContentType->get_sniffed_type + (get_file_head => sub { + return substr $r->{s}, 0, shift; + }, + http_content_type_byte => $ct, + has_http_content_encoding => + defined $res->header ('Content-Encoding'), + supported_image_types => {}); } else { $r->{uri} = $res->request->uri; $r->{request_uri} = $request_uri; @@ -642,17 +528,28 @@ $r->{header_status_code} = $res->code; $r->{header_status_text} = $res->message; } else { - $r->{s} = ''.$http->parameter ('s'); + $r->{s} = ''.$http->get_parameter ('s'); $r->{uri} = q; $r->{request_uri} = q; $r->{base_uri} = q; - $r->{charset} = ''.$http->parameter ('_charset_'); + $r->{charset} = ''.$http->get_parameter ('_charset_'); $r->{charset} =~ s/\s+//g; $r->{charset} = 'utf-8' if $r->{charset} eq ''; + $r->{official_charset} = $r->{charset}; $r->{header_field} = []; + + require Whatpm::ContentType; + ($r->{official_type}, $r->{media_type}) + = Whatpm::ContentType->get_sniffed_type + (get_file_head => sub { + return substr $r->{s}, 0, shift; + }, + http_content_type_byte => undef, + has_http_content_encoding => 0, + supported_image_types => {}); } - my $input_format = $http->parameter ('i'); + my $input_format = $http->get_parameter ('i'); if (defined $input_format and length $input_format) { $r->{media_type_overridden} = (not defined $r->{media_type} or $input_format ne $r->{media_type}); @@ -666,6 +563,7 @@ if ($r->{media_type} eq 'text/xml') { unless (defined $r->{charset}) { $r->{charset} = 'us-ascii'; + $r->{official_charset} = $r->{charset}; } elsif ($r->{charset_overridden} and $r->{charset} eq 'us-ascii') { $r->{charset_overridden} = 0; } @@ -677,6 +575,8 @@ return $r; } + $r->{inner_html_element} = $http->get_parameter ('e'); + return $r; } # get_input_document @@ -709,11 +609,11 @@ =head1 LICENSE -Copyright 2007 Wakaba +Copyright 2007-2008 Wakaba This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =cut -## $Date: 2007/07/17 14:28:20 $ +## $Date: 2008/07/20 16:53:10 $