--- test/html-webhacc/cc.cgi 2007/07/17 13:52:54 1.13 +++ test/html-webhacc/cc.cgi 2008/08/15 16:44:03 1.63 @@ -2,682 +2,139 @@ use strict; use lib qw[/home/httpd/html/www/markup/html/whatpm - /home/wakaba/work/manakai/lib - /home/wakaba/public_html/-temp/wiki/lib]; + /home/wakaba/work/manakai2/lib]; use CGI::Carp qw[fatalsToBrowser]; -use Scalar::Util qw[refaddr]; -use SuikaWiki::Input::HTTP; ## TODO: Use some better CGI module + require WebHACC::Input; -sub htescape ($) { - my $s = $_[0]; - $s =~ s/&/&/g; - $s =~ s//>/g; - $s =~ s/"/"/g; - $s =~ s{([\x00-\x09\x0B-\x1F\x7F-\xA0\x{FEFF}\x{FFFC}-\x{FFFF}])}{ - sprintf 'U+%04X', ord $1; - }ge; - return $s; -} # htescape - -my $http = SuikaWiki::Input::HTTP->new; +{ + require Message::CGI::HTTP; + my $http = Message::CGI::HTTP->new; -## TODO: _charset_ + require WebHACC::Output; + my $out = WebHACC::Output->new; + $out->handle (*STDOUT); + $out->set_utf8; - if ($http->meta_variable ('PATH_INFO') ne '/') { - print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n400"; + if ($http->get_meta_variable ('PATH_INFO') ne '/') { + $out->http_error (404); exit; } - binmode STDOUT, ':utf8'; - - require Message::DOM::DOMImplementation; - my $dom = Message::DOM::DOMImplementation->new; - - my $input = get_input_document ($http, $dom); - my $inner_html_element = $http->parameter ('e'); - - load_text_catalog ('en'); ## TODO: conneg - - my @nav; - print STDOUT qq[Content-Type: text/html; charset=utf-8 - - - - -Web Document Conformance Checker (BETA) - - - -

Web Document Conformance Checker -(beta)

- -
-
-
Request URI
-
<@{[htescape $input->{request_uri}]}>
-
Document URI
-
<@{[htescape $input->{uri}]}>
-]; # no
yet - push @nav, ['#document-info' => 'Information']; - -if (defined $input->{s}) { - - print STDOUT qq[ -
Base URI
-
<@{[htescape $input->{base_uri}]}>
-
Internet Media Type
-
@{[htescape $input->{media_type}]} - @{[$input->{media_type_overridden} ? '(overridden)' : '']}
-
Character Encoding
-
@{[defined $input->{charset} ? ''.htescape ($input->{charset}).'' : '(none)']} - @{[$input->{charset_overridden} ? '(overridden)' : '']}
- -
-]; - - print_http_header_section ($input); - - my $doc; - my $el; - - if ($input->{media_type} eq 'text/html') { - require Encode; - require Whatpm::HTML; - - $input->{charset} ||= 'ISO-8859-1'; ## TODO: for now. - - my $t = Encode::decode ($input->{charset}, $input->{s}); - - print STDOUT qq[ -
-

Parse Errors

- -
]; - push @nav, ['#parse-errors' => 'Parse Error']; - - my $onerror = sub { - my (%opt) = @_; - my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level}); - if ($opt{column} > 0) { - print STDOUT qq[
Line $opt{line} column $opt{column}
\n]; - } else { - $opt{line} = $opt{line} - 1 || 1; - print STDOUT qq[
Line $opt{line}
\n]; - } - $type =~ tr/ /-/; - $type =~ s/\|/%7C/g; - $msg .= qq[ [Description]]; - print STDOUT qq[
$msg
\n]; - }; - - $doc = $dom->create_document; - if (defined $inner_html_element and length $inner_html_element) { - $el = $doc->create_element_ns - ('http://www.w3.org/1999/xhtml', [undef, $inner_html_element]); - Whatpm::HTML->set_inner_html ($el, $t, $onerror); - } else { - Whatpm::HTML->parse_string ($t => $doc, $onerror); - } - - print STDOUT qq[
-
-]; - - print_source_string_section (\($input->{s}), $input->{charset}); - } elsif ({ - 'text/xml' => 1, - 'application/xhtml+xml' => 1, - 'application/xml' => 1, - }->{$input->{media_type}}) { - require Message::DOM::XMLParserTemp; - - print STDOUT qq[ -
-

Parse Errors

- -
]; - push @nav, ['#parse-errors' => 'Parse Error']; - - my $onerror = sub { - my $err = shift; - my $line = $err->location->line_number; - print STDOUT qq[
Line $line column ]; - print STDOUT $err->location->column_number, "
"; - print STDOUT htescape $err->text, "
\n"; - return 1; - }; - - open my $fh, '<', \($input->{s}); - $doc = Message::DOM::XMLParserTemp->parse_byte_stream - ($fh => $dom, $onerror, charset => $input->{charset}); - - print STDOUT qq[
-
- -]; - print_source_string_section (\($input->{s}), $doc->input_encoding); - } else { - ## TODO: Change HTTP status code?? - print STDOUT qq[ -
-

Media type @{[htescape $input->{media_type}]} is not supported!

-
-]; - push @nav, ['#result-summary' => 'Result']; - } - - - if (defined $doc or defined $el) { - print STDOUT qq[ -
-

Document Tree

-]; - push @nav, ['#document-tree' => 'Tree']; - - print_document_tree ($el || $doc); - - print STDOUT qq[ -
- -
-

Document Errors

- -
]; - push @nav, ['#document-errors' => 'Document Error']; - - require Whatpm::ContentChecker; - my $onerror = sub { - my %opt = @_; - my ($type, $cls, $msg) = get_text ($opt{type}, $opt{level}); - $type =~ tr/ /-/; - $type =~ s/\|/%7C/g; - $msg .= qq[ [Description]]; - print STDOUT qq[
] . get_node_link ($opt{node}) . - qq[
\n
], $msg, "
\n"; - }; - - my $elements; - if ($el) { - $elements = Whatpm::ContentChecker->check_element ($el, $onerror); - } else { - $elements = Whatpm::ContentChecker->check_document ($doc, $onerror); - } - - print STDOUT qq[
-
-]; - - if (@{$elements->{table}}) { - require JSON; - - print STDOUT qq[ -
-

Tables

- - - - -]; - - my $i = 0; - for my $table_el (@{$elements->{table}}) { - $i++; - print STDOUT qq[

] . - get_node_link ($table_el) . q[

]; - - ## TODO: Make |ContentChecker| return |form_table| result - ## so that this script don't have to run the algorithm twice. - my $table = Whatpm::HTMLTable->form_table ($table_el); - - for (@{$table->{column_group}}, @{$table->{column}}, $table->{caption}) { - next unless $_; - delete $_->{element}; - } - - for (@{$table->{row_group}}) { - next unless $_; - next unless $_->{element}; - $_->{type} = $_->{element}->manakai_local_name; - delete $_->{element}; - } - - for (@{$table->{cell}}) { - next unless $_; - for (@{$_}) { - next unless $_; - for (@$_) { - $_->{id} = refaddr $_->{element} if defined $_->{element}; - delete $_->{element}; - $_->{is_header} = $_->{is_header} ? 1 : 0; - } - } - } - - print STDOUT '
]; - } - - print STDOUT qq[
]; - } - - if (keys %{$elements->{id}}) { - print STDOUT qq[ -
-

Identifiers

- -
-]; - for my $id (sort {$a cmp $b} keys %{$elements->{id}}) { - print STDOUT qq[
@{[htescape $id]}
]; - for (@{$elements->{id}->{$id}}) { - print STDOUT qq[
].get_node_link ($_).qq[
]; - } - } - print STDOUT qq[
]; - } - - if (keys %{$elements->{term}}) { - print STDOUT qq[ -
-

Terms

- -
-]; - for my $term (sort {$a cmp $b} keys %{$elements->{term}}) { - print STDOUT qq[
@{[htescape $term]}
]; - for (@{$elements->{term}->{$term}}) { - print STDOUT qq[
].get_node_link ($_).qq[
]; - } - } - print STDOUT qq[
]; - } - } - - ## TODO: Show result -} else { - print STDOUT qq[ - - - -
-

Input Error: @{[htescape ($input->{error_status_text})]}

-
-]; - push @nav, ['#result-summary' => 'Result']; - -} - - print STDOUT qq[ - - - -]; - -exit; - -sub print_http_header_section ($) { - my $input = shift; - return unless defined $input->{header_status_code} or - defined $input->{header_status_text} or - @{$input->{header_field}}; + $out->load_text_catalog ($primary_language); - push @nav, ['#source-header' => 'HTTP Header']; - print STDOUT qq[
-

HTTP Header

- -

Note: Due to the limitation of the -network library in use, the content of this section might -not be the real header.

- - -]; - - if (defined $input->{header_status_code}) { - print STDOUT qq[]; - print STDOUT qq[]; - } - if (defined $input->{header_status_text}) { - print STDOUT qq[]; - print STDOUT qq[]; - } - - for (@{$input->{header_field}}) { - print STDOUT qq[]; - print STDOUT qq[]; - } - - print STDOUT qq[
Status code@{[htescape ($input->{header_status_code})]}
Status text@{[htescape ($input->{header_status_text})]}
@{[htescape ($_->[0])]}@{[htescape ($_->[1])]}
]; -} # print_http_header_section - -sub print_source_string_section ($$) { - require Encode; - my $enc = Encode::find_encoding ($_[1]); ## TODO: charset name -> Perl name - return unless $enc; - - my $s = \($enc->decode (${$_[0]})); - my $i = 1; - push @nav, ['#source-string' => 'Source']; - print STDOUT qq[
-

Document Source

-
    \n]; - if (length $$s) { - while ($$s =~ /\G([^\x0A]*?)\x0D?\x0A/gc) { - print STDOUT qq[
  1. ], htescape $1, "
  2. \n"; - $i++; - } - if ($$s =~ /\G([^\x0A]+)/gc) { - print STDOUT qq[
  3. ], htescape $1, "
  4. \n"; - } - } else { - print STDOUT q[
  5. ]; - } - print STDOUT "
"; -} # print_input_string_section - -sub print_document_tree ($) { - my $node = shift; - my $r = '
    '; - - my @node = ($node); - while (@node) { - my $child = shift @node; - unless (ref $child) { - $r .= $child; - next; - } - - my $node_id = 'node-'.refaddr $child; - my $nt = $child->node_type; - if ($nt == $child->ELEMENT_NODE) { - my $child_nsuri = $child->namespace_uri; - $r .= qq[
  1. ] . htescape ($child->tag_name) . - ''; ## ISSUE: case - - if ($child->has_attributes) { - $r .= ''; - } - - if ($child->has_child_nodes) { - $r .= '
      '; - unshift @node, @{$child->child_nodes}, '
  2. '; - } else { - $r .= ''; - } - } elsif ($nt == $child->TEXT_NODE) { - $r .= qq'
  3. ' . htescape ($child->data) . '
  4. '; - } elsif ($nt == $child->CDATA_SECTION_NODE) { - $r .= qq'
  5. <[CDATA[' . htescape ($child->data) . ']]>
  6. '; - } elsif ($nt == $child->COMMENT_NODE) { - $r .= qq'
  7. <!--' . htescape ($child->data) . '-->
  8. '; - } elsif ($nt == $child->DOCUMENT_NODE) { - $r .= qq'
  9. Document'; - $r .= qq[]; - if ($child->has_child_nodes) { - $r .= '
      '; - unshift @node, @{$child->child_nodes}, '
  10. '; - } - } elsif ($nt == $child->DOCUMENT_TYPE_NODE) { - $r .= qq'
  11. <!DOCTYPE>
  12. '; - } elsif ($nt == $child->PROCESSING_INSTRUCTION_NODE) { - $r .= qq'
  13. <?@{[htescape ($child->target)]} @{[htescape ($child->data)]}?>
  14. '; - } else { - $r .= qq'
  15. @{[$child->node_type]} @{[htescape ($child->node_name)]}
  16. '; # error - } - } - - $r .= '
'; - print STDOUT $r; -} # print_document_tree - -sub get_node_path ($) { - my $node = shift; - my @r; - while (defined $node) { - my $rs; - if ($node->node_type == 1) { - $rs = $node->manakai_local_name; - $node = $node->parent_node; - } elsif ($node->node_type == 2) { - $rs = '@' . $node->manakai_local_name; - $node = $node->owner_element; - } elsif ($node->node_type == 3) { - $rs = '"' . $node->data . '"'; - $node = $node->parent_node; - } elsif ($node->node_type == 9) { - @r = ('') unless @r; - $rs = ''; - $node = $node->parent_node; - } else { - $rs = '#' . $node->node_type; - $node = $node->parent_node; - } - unshift @r, $rs; + $out->set_flush; + $out->http_header; + $out->html_header; + $out->unset_flush; + + $out->generate_input_section ($http); + + my $u = $http->get_parameter ('uri'); + my $s = $http->get_parameter ('s'); + if ((not defined $u or not length $u) and + (not defined $s or not length $s)) { + exit; } - return join '/', @r; -} # get_node_path -sub get_node_link ($) { - return qq[] . - htescape (get_node_path ($_[0])) . qq[]; -} # get_node_link + require WebHACC::Result; + my $result = WebHACC::Result->new; + $result->output ($out); -{ - my $Msg = {}; + require WebHACC::Input; + my $input = WebHACC::Input->get_document ($http => $result => $out); -sub load_text_catalog ($) { - my $lang = shift; # MUST be a canonical lang name - open my $file, '<', "cc-msg.$lang.txt" or die "$0: cc-msg.$lang.txt: $!"; - while (<$file>) { - if (s/^([^;]+);([^;]*);//) { - my ($type, $cls, $msg) = ($1, $2, $_); - $msg =~ tr/\x0D\x0A//d; - $Msg->{$type} = [$cls, $msg]; - } - } -} # load_text_catalog - -sub get_text ($) { - my ($type, $level) = @_; - $type = $level . ':' . $type if defined $level; - my @arg; - { - if (defined $Msg->{$type}) { - my $msg = $Msg->{$type}->[1]; - $msg =~ s{\$([0-9]+)}{ - defined $arg[$1] ? htescape ($arg[$1]) : '(undef)'; - }ge; - return ($type, $Msg->{$type}->[0], $msg); - } elsif ($type =~ s/:([^:]*)$//) { - unshift @arg, $1; - redo; - } - } - return ($type, '', htescape ($_[0])); -} # get_text + check_and_print ($input => $result => $out); + + $out->nav_list; + exit; } -sub get_input_document ($$) { - my ($http, $dom) = @_; - - my $request_uri = $http->parameter ('uri'); - my $r = {}; - if (defined $request_uri and length $request_uri) { - my $uri = $dom->create_uri_reference ($request_uri); - unless ({ - http => 1, - }->{lc $uri->uri_scheme}) { - return {uri => $request_uri, request_uri => $request_uri, - error_status_text => 'URI scheme not allowed'}; - } - - require Message::Util::HostPermit; - my $host_permit = new Message::Util::HostPermit; - $host_permit->add_rule (<check ($uri->uri_host, $uri->uri_port || 80)) { - return {uri => $request_uri, request_uri => $request_uri, - error_status_text => 'Connection to the host is forbidden'}; - } - - require LWP::UserAgent; - my $ua = WDCC::LWPUA->new; - $ua->{wdcc_dom} = $dom; - $ua->{wdcc_host_permit} = $host_permit; - $ua->agent ('Mozilla'); ## TODO: for now. - $ua->parse_head (0); - $ua->protocols_allowed ([qw/http/]); - $ua->max_size (1000_000); - my $req = HTTP::Request->new (GET => $request_uri); - my $res = $ua->request ($req); - if ($res->is_success or $http->parameter ('error-page')) { - $r->{base_uri} = $res->base; ## NOTE: It does check |Content-Base|, |Content-Location|, and . ## TODO: Use our own code! - $r->{uri} = $res->request->uri; - $r->{request_uri} = $request_uri; - - ## TODO: More strict parsing... - my $ct = $res->header ('Content-Type'); - if (defined $ct and $ct =~ m#^([0-9A-Za-z._+-]+/[0-9A-Za-z._+-]+)#) { - $r->{media_type} = lc $1; - } - if (defined $ct and $ct =~ /;\s*charset\s*=\s*"?(\S+)"?/i) { - $r->{charset} = lc $1; - $r->{charset} =~ tr/\\//d; - } - - my $input_charset = $http->parameter ('charset'); - if (defined $input_charset and length $input_charset) { - $r->{charset_overridden} - = (not defined $r->{charset} or $r->{charset} ne $input_charset); - $r->{charset} = $input_charset; - } - - $r->{s} = ''.$res->content; - } else { - $r->{uri} = $res->request->uri; - $r->{request_uri} = $request_uri; - $r->{error_status_text} = $res->status_line; - } - - $r->{header_field} = []; - $res->scan (sub { - push @{$r->{header_field}}, [$_[0], $_[1]]; - }); - $r->{header_status_code} = $res->code; - $r->{header_status_text} = $res->message; - } else { - $r->{s} = ''.$http->parameter ('s'); - $r->{uri} = q; - $r->{request_uri} = q; - $r->{base_uri} = q; - $r->{charset} = ''.$http->parameter ('_charset_'); - $r->{charset} =~ s/\s+//g; - $r->{charset} = 'utf-8' if $r->{charset} eq ''; - $r->{header_field} = []; - } - - my $input_format = $http->parameter ('i'); - if (defined $input_format and length $input_format) { - $r->{media_type_overridden} - = (not defined $r->{media_type} or $input_format ne $r->{media_type}); - $r->{media_type} = $input_format; - } - if (defined $r->{s} and not defined $r->{media_type}) { - $r->{media_type} = 'text/html'; - $r->{media_type_overridden} = 1; - } - - if ($r->{media_type} eq 'text/xml') { - unless (defined $r->{charset}) { - $r->{charset} = 'us-ascii'; - } elsif ($r->{charset_overridden} and $r->{charset} eq 'us-ascii') { - $r->{charset_overridden} = 0; - } - } - - if (length $r->{s} > 1000_000) { - $r->{error_status_text} = 'Entity-body too large'; - delete $r->{s}; - return $r; +sub check_and_print ($$$) { + my ($input, $result, $out) = @_; + my $original_input = $out->input; + $out->input ($input); + + $input->generate_info_section ($result); + + $input->generate_transfer_sections ($result); + + unless (defined $input->{s}) { + ## NOTE: This is an error of the implementation. + $result->layer_uncertain ('transfer'); + $result->generate_result_section; + + $out->input ($original_input); + return; + } + + my $checker_class = { + 'text/cache-manifest' => 'WebHACC::Language::CacheManifest', + 'text/css' => 'WebHACC::Language::CSS', + 'text/html' => 'WebHACC::Language::HTML', + 'text/x-webidl' => 'WebHACC::Language::WebIDL', + + 'text/xml' => 'WebHACC::Language::XML', + 'application/atom+xml' => 'WebHACC::Language::XML', + 'application/rss+xml' => 'WebHACC::Language::XML', + 'image/svg+xml' => 'WebHACC::Language::XML', + 'application/xhtml+xml' => 'WebHACC::Language::XML', + 'application/xml' => 'WebHACC::Language::XML', + ## TODO: Should we make all XML MIME Types fall + ## into this category? + + ## NOTE: This type has different model from normal XML types. + 'application/rdf+xml' => 'WebHACC::Language::XML', + }->{$input->{media_type}} || 'WebHACC::Language::Default'; + + eval qq{ require $checker_class } or die "$0: Loading $checker_class: $@"; + my $checker = $checker_class->new; + $checker->input ($input); + $checker->output ($out); + $checker->result ($result); + + ## TODO: A cache manifest MUST be text/cache-manifest + ## TODO: WebIDL media type "text/x-webidl" + + $checker->generate_syntax_error_section; + $checker->generate_source_string_section; + + my @subdoc; + $checker->onsubdoc (sub { + push @subdoc, shift; + }); + + $checker->generate_structure_dump_section; + $checker->generate_structure_error_section; + $checker->generate_additional_sections; + + my $id_prefix = 0; + for my $_subinput (@subdoc) { + my $subinput = WebHACC::Input::Subdocument->new (++$id_prefix); + $subinput->{$_} = $_subinput->{$_} for keys %$_subinput; + $subinput->{base_uri} = $subinput->{container_node}->base_uri + unless defined $subinput->{base_uri}; + $subinput->{parent_input} = $input; + + my $subresult = WebHACC::Result->new; + $subresult->output ($out); + $subresult->parent_result ($result); + + $subinput->start_section ($subresult); + check_and_print ($subinput => $subresult => $out); + $subinput->end_section ($subresult); } - return $r; -} # get_input_document + $result->generate_result_section; -package WDCC::LWPUA; -BEGIN { push our @ISA, 'LWP::UserAgent'; } - -sub redirect_ok { - my $ua = shift; - unless ($ua->SUPER::redirect_ok (@_)) { - return 0; - } - - my $uris = $_[1]->header ('Location'); - return 0 unless $uris; - my $uri = $ua->{wdcc_dom}->create_uri_reference ($uris); - unless ({ - http => 1, - }->{lc $uri->uri_scheme}) { - return 0; - } - unless ($ua->{wdcc_host_permit}->check ($uri->uri_host, $uri->uri_port || 80)) { - return 0; - } - return 1; -} # redirect_ok + $out->input ($original_input); +} # check_and_print =head1 AUTHOR @@ -685,11 +142,11 @@ =head1 LICENSE -Copyright 2007 Wakaba +Copyright 2007-2008 Wakaba This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =cut -## $Date: 2007/07/17 13:52:54 $ +## $Date: 2008/08/15 16:44:03 $