--- test/html-webhacc/cc.cgi 2007/06/27 11:08:03 1.1
+++ test/html-webhacc/cc.cgi 2008/07/21 08:39:12 1.56
@@ -1,227 +1,398 @@
#!/usr/bin/perl
use strict;
+use utf8;
use lib qw[/home/httpd/html/www/markup/html/whatpm
- /home/wakaba/work/manakai/lib
- /home/wakaba/public_html/-temp/wiki/lib];
+ /home/wakaba/work/manakai2/lib];
use CGI::Carp qw[fatalsToBrowser];
-use Time::HiRes qw/time/;
+use Scalar::Util qw[refaddr];
-use SuikaWiki::Input::HTTP; ## TODO: Use some better CGI module
+ require WebHACC::Input;
+ require WebHACC::Result;
+ require WebHACC::Output;
-my $http = SuikaWiki::Input::HTTP->new;
+my $out;
-## TODO: _charset_
-
-my @mode = split m#/#, scalar $http->meta_variable ('PATH_INFO'), -1;
-shift @mode if @mode and $mode[0] == '';
-## TODO: decode unreserved characters
+ require Message::DOM::DOMImplementation;
+ my $dom = Message::DOM::DOMImplementation->new;
+{
+ use Message::CGI::HTTP;
+ my $http = Message::CGI::HTTP->new;
- my $s = $http->parameter ('s');
- if (length $s > 1000_000) {
- print STDOUT "Status: 400 Document Too Long\nContent-Type: text/plain; charset=us-ascii\n\nToo long";
+ if ($http->get_meta_variable ('PATH_INFO') ne '/') {
+ print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n400";
exit;
}
- my $char_length = length $s;
- my %time;
- my $time1;
- my $time2;
+
+ load_text_catalog ('en'); ## TODO: conneg
- require Message::DOM::DOMImplementation;
- my $dom = Message::DOM::DOMImplementation->____new;
-# $| = 1;
- my $doc;
- my $el;
-
-if (@mode == 3 and $mode[0] eq 'html' and
- ($mode[2] eq 'html' or $mode[2] eq 'test')) {
- print STDOUT "Content-Type: text/plain; charset=utf-8\n\n";
+ $out = WebHACC::Output->new;
+ $out->handle (*STDOUT);
+ $out->set_utf8;
+ $out->set_flush;
+ $out->html (qq[Content-Type: text/html; charset=utf-8
+
+
+
+
+Web Document Conformance Checker (BETA)
+
+
+
+
+]);
+
+ my $input = get_input_document ($http, $dom);
+
+ $out->input ($input);
+ $out->unset_flush;
+
+ my $result = WebHACC::Result->new;
+ $result->output ($out);
+ $result->{conforming_min} = 1;
+ $result->{conforming_max} = 1;
- require Encode;
- require Whatpm::HTML;
+ $out->html ('');
- $time1 = time;
- $s = Encode::decode ('utf-8', $s);
- $time2 = time;
- $time{decode} = $time2 - $time1;
+ check_and_print ($input => $result => $out);
+ $result->generate_result_section;
- print STDOUT "#errors\n";
+ $out->nav_list;
- my $onerror = sub {
- my (%opt) = @_;
- print STDOUT "$opt{line},$opt{column},$opt{type}\n";
- };
-
- $doc = $dom->create_document;
- $time1 = time;
- if (length $mode[1]) {
- $el = $doc->create_element_ns
- ('http://www.w3.org/1999/xhtml', [undef, $mode[1]]);
- Whatpm::HTML->set_inner_html ($el, $s, $onerror);
- } else {
- Whatpm::HTML->parse_string ($s => $doc, $onerror);
+ exit;
+}
+
+sub check_and_print ($$$) {
+ my ($input, $result, $out) = @_;
+ my $original_input = $out->input;
+ $out->input ($input);
+
+ $input->generate_info_section ($result);
+
+ $input->generate_transfer_sections ($result);
+
+ unless (defined $input->{s}) {
+ $result->{conforming_min} = 0;
+ return;
}
- $time2 = time;
- $time{parse} = $time2 - $time1;
- print "#document\n";
+ my $checker_class = {
+ 'text/cache-manifest' => 'WebHACC::Language::CacheManifest',
+ 'text/css' => 'WebHACC::Language::CSS',
+ 'text/html' => 'WebHACC::Language::HTML',
+ 'text/x-webidl' => 'WebHACC::Language::WebIDL',
+
+ 'text/xml' => 'WebHACC::Language::XML',
+ 'application/atom+xml' => 'WebHACC::Language::XML',
+ 'application/rss+xml' => 'WebHACC::Language::XML',
+ 'image/svg+xml' => 'WebHACC::Language::XML',
+ 'application/xhtml+xml' => 'WebHACC::Language::XML',
+ 'application/xml' => 'WebHACC::Language::XML',
+ ## TODO: Should we make all XML MIME Types fall
+ ## into this category?
+
+ ## NOTE: This type has different model from normal XML types.
+ 'application/rdf+xml' => 'WebHACC::Language::XML',
+ }->{$input->{media_type}} || 'WebHACC::Language::Default';
+
+ eval qq{ require $checker_class } or die "$0: Loading $checker_class: $@";
+ my $checker = $checker_class->new;
+ $checker->input ($input);
+ $checker->output ($out);
+ $checker->result ($result);
+
+ ## TODO: A cache manifest MUST be text/cache-manifest
+ ## TODO: WebIDL media type "text/x-webidl"
+
+ $checker->generate_syntax_error_section;
+ $checker->generate_source_string_section;
+
+ my @subdoc;
+ $checker->onsubdoc (sub {
+ push @subdoc, shift;
+ });
+
+ $checker->generate_structure_dump_section;
+ $checker->generate_structure_error_section;
+ $checker->generate_additional_sections;
+
+=pod
+
+ if (defined $doc or defined $el) {
+
+ print_listing_section ({
+ id => 'identifiers', label => 'IDs', heading => 'Identifiers',
+ }, $input, $elements->{id}) if keys %{$elements->{id}};
+ print_listing_section ({
+ id => 'terms', label => 'Terms', heading => 'Terms',
+ }, $input, $elements->{term}) if keys %{$elements->{term}};
+ print_listing_section ({
+ id => 'classes', label => 'Classes', heading => 'Classes',
+ }, $input, $elements->{class}) if keys %{$elements->{class}};
+
+ print_rdf_section ($input, $elements->{rdf}) if @{$elements->{rdf}};
+ }
- my $out;
- if ($mode[2] eq 'html') {
- $time1 = time;
- $out = Whatpm::HTML->get_inner_html ($el || $doc);
- $time2 = time;
- $time{serialize_html} = $time2 - $time1;
- } else { # test
- $time1 = time;
- $out = test_serialize ($el || $doc);
- $time2 = time;
- $time{serialize_test} = $time2 - $time1;
- }
- print STDOUT Encode::encode ('utf-8', $$out);
- print STDOUT "\n";
-} elsif (@mode == 3 and $mode[0] eq 'xhtml' and
- ($mode[2] eq 'html' or $mode[2] eq 'test')) {
- print STDOUT "Content-Type: text/plain; charset=utf-8\n\n";
-
- require Message::DOM::XMLParserTemp;
- print STDOUT "#errors\n";
-
- my $onerror = sub {
- my $err = shift;
- print STDOUT $err->location->line_number, ",";
- print STDOUT $err->location->column_number, ",";
- print STDOUT $err->text, "\n";
- return 1;
- };
-
- open my $fh, '<', \$s;
- my $time1 = time;
- $doc = Message::DOM::XMLParserTemp->parse_byte_stream
- ($fh => $dom, $onerror, charset => 'utf-8');
- my $time2 = time;
- $time{parse_xml} = $time2 - $time1;
-
- print "#document\n";
-
- my $out;
- if ($mode[2] eq 'html') {
- ## TODO: Use XHTML serializer
- #$out = Whatpm::HTML->get_inner_html ($doc);
- } else { # test
- $time1 = time;
- $out = test_serialize ($doc);
- $time2 = time;
- $time{serialize_test} = $time2 - $time1;
- }
- print STDOUT Encode::encode ('utf-8', $$out);
- print STDOUT "\n";
-} else {
- print STDOUT "Status: 404 Not Found\nContent-Type: text/plain; charset=us-ascii\n\n404";
- exit;
-}
+=cut
- if ($http->parameter ('dom5')) {
- require Whatpm::ContentChecker;
- my $onerror = sub {
- my %opt = @_;
- print STDOUT get_node_path ($opt{node}) . ';' . $opt{type} . "\n";
- };
- print STDOUT "#domerrors\n";
- $time1 = time;
- if ($el) {
- Whatpm::ContentChecker->check_element ($el, $onerror);
- } else {
- Whatpm::ContentChecker->check_document ($doc, $onerror);
+ my $id_prefix = 0;
+ for my $_subinput (@subdoc) {
+ my $subinput = WebHACC::Input::Subdocument->new (++$id_prefix);
+ $subinput->{$_} = $_subinput->{$_} for keys %$_subinput;
+ $subinput->{base_uri} = $subinput->{container_node}->base_uri
+ unless defined $subinput->{base_uri};
+ $subinput->{parent_input} = $input;
+
+ $subinput->start_section ($result);
+ check_and_print ($subinput => $result => $out);
+ $subinput->end_section ($result);
+ }
+
+ $out->input ($original_input);
+} # check_and_print
+
+
+{
+ my $Msg = {};
+
+sub load_text_catalog ($) {
+# my $self = shift;
+ my $lang = shift; # MUST be a canonical lang name
+ open my $file, '<:utf8', "cc-msg.$lang.txt"
+ or die "$0: cc-msg.$lang.txt: $!";
+ while (<$file>) {
+ if (s/^([^;]+);([^;]*);//) {
+ my ($type, $cls, $msg) = ($1, $2, $_);
+ $msg =~ tr/\x0D\x0A//d;
+ $Msg->{$type} = [$cls, $msg];
}
- $time2 = time;
- $time{check} = $time2 - $time1;
}
+} # load_text_catalog
+
+sub get_text ($;$$) {
+# my $self = shift;
+ my ($type, $level, $node) = @_;
+ $type = $level . ':' . $type if defined $level;
+ $level = 'm' unless defined $level;
+ my @arg;
+ {
+ if (defined $Msg->{$type}) {
+ my $msg = $Msg->{$type}->[1];
+ $msg =~ s{\$([0-9]+)}{
+ defined $arg[$1] ? ($arg[$1]) : '(undef)';
+ }ge; ##BUG: ^ must be escaped
+ $msg =~ s{{\@([A-Za-z0-9:_.-]+)}}{
+ UNIVERSAL::can ($node, 'get_attribute_ns')
+ ? ($node->get_attribute_ns (undef, $1)) : ''
+ }ge; ## BUG: ^ must be escaped
+ $msg =~ s{{\@}}{ ## BUG: v must be escaped
+ UNIVERSAL::can ($node, 'value') ? ($node->value) : ''
+ }ge;
+ $msg =~ s{{local-name}}{
+ UNIVERSAL::can ($node, 'manakai_local_name')
+ ? ($node->manakai_local_name) : ''
+ }ge; ## BUG: ^ must be escaped
+ $msg =~ s{{element-local-name}}{
+ (UNIVERSAL::can ($node, 'owner_element') and
+ $node->owner_element)
+ ? ($node->owner_element->manakai_local_name)
+ : '' ## BUG: ^ must be escaped
+ }ge;
+ return ($type, 'level-' . $level . ' ' . $Msg->{$type}->[0], $msg);
+ } elsif ($type =~ s/:([^:]*)$//) {
+ unshift @arg, $1;
+ redo;
+ }
+ }
+ return ($type, 'level-'.$level, ($_[0]));
+ ## BUG: ^ must be escaped
+} # get_text
+
+}
+
+sub get_input_document ($$) {
+ my ($http, $dom) = @_;
+
+ require Encode;
+ my $request_uri = Encode::decode ('utf-8', $http->get_parameter ('uri'));
+ my $r = WebHACC::Input->new;
+ if (defined $request_uri and length $request_uri) {
+ my $uri = $dom->create_uri_reference ($request_uri);
+ unless ({
+ http => 1,
+ }->{lc $uri->uri_scheme}) {
+ $r = WebHACC::Input::Error->new;
+ $r->{uri} = $request_uri;
+ $r->{request_uri} = $request_uri;
+ $r->{error_status_text} = 'URL scheme not allowed';
+ }
+
+ require Message::Util::HostPermit;
+ my $host_permit = new Message::Util::HostPermit;
+ $host_permit->add_rule (<check ($uri->uri_host, $uri->uri_port || 80)) {
+ my $r = WebHACC::Input::Error->new;
+ $r->{uri} = $request_uri;
+ $r->{request_uri} = $request_uri;
+ $r->{error_status_text} = 'Connection to the host is forbidden';
+ return $r;
+ }
- print STDOUT "#log\n";
- for (qw/decode parse parse_xml serialize_html serialize_xml serialize_test
- check/) {
- next unless defined $time{$_};
- print STDOUT {
- decode => 'bytes->chars',
- parse => 'html5(chars)->dom5',
- parse_xml => 'xml1(chars)->dom5',
- serialize_html => 'dom5->html5(char)',
- serialize_xml => 'dom5->xml1(char)',
- serialize_test => 'dom5->test(char)',
- check => 'dom5 check',
- }->{$_};
- print STDOUT "\t", $time{$_}, "s\n";
- open my $file, '>>', ".manakai-$_.txt" or die ".manakai-$_.txt: $!";
- print $file $char_length, "\t", $time{$_}, "\n";
- }
-
-exit;
-
-sub test_serialize ($) {
- my $node = shift;
- my $r = '';
-
- my @node = map { [$_, ''] } @{$node->child_nodes};
- while (@node) {
- my $child = shift @node;
- my $nt = $child->[0]->node_type;
- if ($nt == $child->[0]->ELEMENT_NODE) {
- $r .= '| ' . $child->[1] . '<' . $child->[0]->tag_name . ">\x0A"; ## ISSUE: case?
-
- for my $attr (sort {$a->[0] cmp $b->[0]} map { [$_->name, $_->value] }
- @{$child->[0]->attributes}) {
- $r .= '| ' . $child->[1] . ' ' . $attr->[0] . '="'; ## ISSUE: case?
- $r .= $attr->[1] . '"' . "\x0A";
+ require LWP::UserAgent;
+ my $ua = WDCC::LWPUA->new;
+ $ua->{wdcc_dom} = $dom;
+ $ua->{wdcc_host_permit} = $host_permit;
+ $ua->agent ('Mozilla'); ## TODO: for now.
+ $ua->parse_head (0);
+ $ua->protocols_allowed ([qw/http/]);
+ $ua->max_size (1000_000);
+ my $req = HTTP::Request->new (GET => $request_uri);
+ $req->header ('Accept-Encoding' => 'identity, *; q=0');
+ my $res = $ua->request ($req);
+ ## TODO: 401 sets |is_success| true.
+ if ($res->is_success or $http->get_parameter ('error-page')) {
+ $r->{base_uri} = $res->base; ## NOTE: It does check |Content-Base|, |Content-Location|, and . ## TODO: Use our own code!
+ $r->{uri} = $res->request->uri;
+ $r->{request_uri} = $request_uri;
+
+ ## TODO: More strict parsing...
+ my $ct = $res->header ('Content-Type');
+ if (defined $ct and $ct =~ /;\s*charset\s*=\s*"?([^\s;"]+)"?/i) {
+ $r->{charset} = lc $1;
+ $r->{charset} =~ tr/\\//d;
+ $r->{official_charset} = $r->{charset};
}
-
- unshift @node,
- map { [$_, $child->[1] . ' '] } @{$child->[0]->child_nodes};
- } elsif ($nt == $child->[0]->TEXT_NODE) {
- $r .= '| ' . $child->[1] . '"' . $child->[0]->data . '"' . "\x0A";
- } elsif ($nt == $child->[0]->CDATA_SECTION_NODE) {
- $r .= '| ' . $child->[1] . '[0]->data . "]]>\x0A";
- } elsif ($nt == $child->[0]->COMMENT_NODE) {
- $r .= '| ' . $child->[1] . '\x0A";
- } elsif ($nt == $child->[0]->DOCUMENT_TYPE_NODE) {
- $r .= '| ' . $child->[1] . '[0]->name . ">\x0A";
- } elsif ($nt == $child->[0]->PROCESSING_INSTRUCTION_NODE) {
- $r .= '| ' . $child->[1] . '' . $child->[0]->target . ' ' .
- $child->[0]->data . "?>\x0A";
+
+ my $input_charset = $http->get_parameter ('charset');
+ if (defined $input_charset and length $input_charset) {
+ $r->{charset_overridden}
+ = (not defined $r->{charset} or $r->{charset} ne $input_charset);
+ $r->{charset} = $input_charset;
+ }
+
+ ## TODO: Support for HTTP Content-Encoding
+
+ $r->{s} = ''.$res->content;
+
+ require Whatpm::ContentType;
+ ($r->{official_type}, $r->{media_type})
+ = Whatpm::ContentType->get_sniffed_type
+ (get_file_head => sub {
+ return substr $r->{s}, 0, shift;
+ },
+ http_content_type_byte => $ct,
+ has_http_content_encoding =>
+ defined $res->header ('Content-Encoding'),
+ supported_image_types => {});
} else {
- $r .= '| ' . $child->[1] . $child->[0]->node_type . "\x0A"; # error
+ $r->{uri} = $res->request->uri;
+ $r->{request_uri} = $request_uri;
+ $r->{error_status_text} = $res->status_line;
}
+
+ $r->{header_field} = [];
+ $res->scan (sub {
+ push @{$r->{header_field}}, [$_[0], $_[1]];
+ });
+ $r->{header_status_code} = $res->code;
+ $r->{header_status_text} = $res->message;
+ } else {
+ $r->{s} = ''.$http->get_parameter ('s');
+ $r->{uri} = q;
+ $r->{request_uri} = q;
+ $r->{base_uri} = q;
+ $r->{charset} = ''.$http->get_parameter ('_charset_');
+ $r->{charset} =~ s/\s+//g;
+ $r->{charset} = 'utf-8' if $r->{charset} eq '';
+ $r->{official_charset} = $r->{charset};
+ $r->{header_field} = [];
+
+ require Whatpm::ContentType;
+ ($r->{official_type}, $r->{media_type})
+ = Whatpm::ContentType->get_sniffed_type
+ (get_file_head => sub {
+ return substr $r->{s}, 0, shift;
+ },
+ http_content_type_byte => undef,
+ has_http_content_encoding => 0,
+ supported_image_types => {});
}
-
- return \$r;
-} # test_serialize
-sub get_node_path ($) {
- my $node = shift;
- my @r;
- while (defined $node) {
- my $rs;
- if ($node->node_type == 1) {
- $rs = $node->manakai_local_name;
- $node = $node->parent_node;
- } elsif ($node->node_type == 2) {
- $rs = '@' . $node->manakai_local_name;
- $node = $node->owner_element;
- } elsif ($node->node_type == 3) {
- $rs = '"' . $node->data . '"';
- $node = $node->parent_node;
- } elsif ($node->node_type == 9) {
- $rs = '';
- $node = $node->parent_node;
- } else {
- $rs = '#' . $node->node_type;
- $node = $node->parent_node;
+ my $input_format = $http->get_parameter ('i');
+ if (defined $input_format and length $input_format) {
+ $r->{media_type_overridden}
+ = (not defined $r->{media_type} or $input_format ne $r->{media_type});
+ $r->{media_type} = $input_format;
+ }
+ if (defined $r->{s} and not defined $r->{media_type}) {
+ $r->{media_type} = 'text/html';
+ $r->{media_type_overridden} = 1;
+ }
+
+ if ($r->{media_type} eq 'text/xml') {
+ unless (defined $r->{charset}) {
+ $r->{charset} = 'us-ascii';
+ $r->{official_charset} = $r->{charset};
+ } elsif ($r->{charset_overridden} and $r->{charset} eq 'us-ascii') {
+ $r->{charset_overridden} = 0;
}
- unshift @r, $rs;
}
- return join '/', @r;
-} # get_node_path
+
+ if (length $r->{s} > 1000_000) {
+ $r->{error_status_text} = 'Entity-body too large';
+ delete $r->{s};
+ return $r;
+ }
+
+ $r->{inner_html_element} = $http->get_parameter ('e');
+
+ return $r;
+} # get_input_document
+
+package WDCC::LWPUA;
+BEGIN { push our @ISA, 'LWP::UserAgent'; }
+
+sub redirect_ok {
+ my $ua = shift;
+ unless ($ua->SUPER::redirect_ok (@_)) {
+ return 0;
+ }
+
+ my $uris = $_[1]->header ('Location');
+ return 0 unless $uris;
+ my $uri = $ua->{wdcc_dom}->create_uri_reference ($uris);
+ unless ({
+ http => 1,
+ }->{lc $uri->uri_scheme}) {
+ return 0;
+ }
+ unless ($ua->{wdcc_host_permit}->check ($uri->uri_host, $uri->uri_port || 80)) {
+ return 0;
+ }
+ return 1;
+} # redirect_ok
=head1 AUTHOR
@@ -229,11 +400,11 @@
=head1 LICENSE
-Copyright 2007 Wakaba
+Copyright 2007-2008 Wakaba
This library is free software; you can redistribute it
and/or modify it under the same terms as Perl itself.
=cut
-## $Date: 2007/06/27 11:08:03 $
+## $Date: 2008/07/21 08:39:12 $