X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?p=xscreensaver;a=blobdiff_plain;f=hacks%2Fwebcollage;h=2daa13b8d17cdbc05c6b94703a4333d7e78a13c7;hp=4ec6147148cc29e793817ea98c75b5b0f0831467;hb=019de959b265701cd0c3fccbb61f2b69f06bf9ee;hpb=5f9c47ca98dd43d8f59b7c27d3fde6edfde4fe21 diff --git a/hacks/webcollage b/hacks/webcollage index 4ec61471..2daa13b8 100755 --- a/hacks/webcollage +++ b/hacks/webcollage @@ -1,6 +1,6 @@ #!/usr/bin/perl -w # -# webcollage, Copyright (c) 1999-2011 by Jamie Zawinski +# webcollage, Copyright © 1999-2013 by Jamie Zawinski # This program decorates the screen with random images from the web. # One satisfied customer described it as "a nonstop pop culture brainbath." # @@ -48,36 +48,46 @@ use strict; #use diagnostics; -use Socket; require Time::Local; require POSIX; use Fcntl ':flock'; # import LOCK_* constants use POSIX qw(strftime); - -use bytes; # Larry can take Unicode and shove it up his ass sideways. - # Perl 5.8.0 causes us to start getting incomprehensible - # errors about UTF-8 all over the place without this. +use LWP::UserAgent; +use bytes; my $progname = $0; $progname =~ s@.*/@@g; -my $version = q{ $Revision: 1.152 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; -my $copyright = "WebCollage $version, Copyright (c) 1999-2011" . +my $version = q{ $Revision: 1.162 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; +my $copyright = "WebCollage $version, Copyright (c) 1999-2013" . " Jamie Zawinski \n" . " http://www.jwz.org/webcollage/\n"; -my @search_methods = ( 20, "googlephotos", \&pick_from_google_image_photos, - 10, "googleimgs", \&pick_from_google_images, - 10, "googlenums", \&pick_from_google_image_numbers, +my @search_methods = ( + # Google is rate-limiting us now, so this works ok from + # a short-running screen saver, but not as a batch job. + # I haven't found a workaround. + # + 7, "googlephotos", \&pick_from_google_image_photos, + 5, "googleimgs", \&pick_from_google_images, + 5, "googlenums", \&pick_from_google_image_numbers, + + # So let's try Bing instead. No rate limiting yet! + # + 7, "bingphotos", \&pick_from_bing_image_photos, + 6, "bingimgs", \&pick_from_bing_images, + 6, "bingnums", \&pick_from_bing_image_numbers, - 19, "altavista", \&pick_from_alta_vista_random_link, - 12, "flickr_recent", \&pick_from_flickr_recent, - 10, "flickr_random", \&pick_from_flickr_random, - 10, "livejournal", \&pick_from_livejournal_images, - 5, "twitter", \&pick_from_twitter_images, + 19, "flickr_recent", \&pick_from_flickr_recent, + 15, "flickr_random", \&pick_from_flickr_random, + 20, "instagram", \&pick_from_instagram, + 6, "livejournal", \&pick_from_livejournal_images, 4, "yahoorand", \&pick_from_yahoo_random_link, + # Twitter destroyed their whole API in 2013. + # 0, "twitpic", \&pick_from_twitpic_images, + # 0, "twitter", \&pick_from_twitter_images, # This is a cute way to search for a certain webcams. # Not included in default methods, since these images @@ -86,6 +96,9 @@ my @search_methods = ( 20, "googlephotos", \&pick_from_google_image_photos, # 0, "securitycam", \&pick_from_security_camera, + # Nonfunctional as of June 2011. + # 0, "altavista", \&pick_from_alta_vista_random_link, + # In Apr 2002, Google asked me to stop searching them. # I asked them to add a "random link" url. They said # "that would be easy, we'll think about it" and then @@ -97,31 +110,31 @@ my @search_methods = ( 20, "googlephotos", \&pick_from_google_image_photos, # it's no longer possible to do "or" searches on news # images, so we rarely get any hits there any more. # - # 0, "yahoonews", \&pick_from_yahoo_news_text, + # 0, "yahoonews", \&pick_from_yahoo_news_text, # Dec 2004: the ircimages guy's server can't take the # heat, so he started banning the webcollage user agent. # I tried to convince him to add a lighter-weight page to # support webcollage better, but he doesn't care. # - # 0, "ircimages", \&pick_from_ircimages, + # 0, "ircimages", \&pick_from_ircimages, # Dec 2002: Alta Vista has a new "random link" URL now. # They added it specifically to better support webcollage! # That was super cool of them. This is how we used to do # it, before: # - # 0, "avimages", \&pick_from_alta_vista_images, - # 0, "avtext", \&pick_from_alta_vista_text, + # 0, "avimages", \&pick_from_alta_vista_images, + # 0, "avtext", \&pick_from_alta_vista_text, # This broke in 2004. Eh, Lycos sucks anyway. # - # 0, "lycos", \&pick_from_lycos_text, + # 0, "lycos", \&pick_from_lycos_text, # This broke in 2003, I think. I suspect Hotbot is # actually the same search engine data as Lycos. # - # 0, "hotbot", \&pick_from_hotbot_text, + # 0, "hotbot", \&pick_from_hotbot_text, ); # programs we can use to write to the root window (tried in ascending order.) @@ -206,20 +219,29 @@ my %poisoners = ( # site" diagnostic message. # my %warningless_sites = ( - "home.earthlink.net" => 1, # Lots of home pages here. - "www.geocities.com" => 1, + "home.earthlink.net" => 1, "www.angelfire.com" => 1, "members.aol.com" => 1, "img.photobucket.com" => 1, "pics.livejournal.com" => 1, "tinypic.com" => 1, "flickr.com" => 1, + "staticflickr.com" => 1, "pbase.com" => 1, "blogger.com" => 1, "multiply.com" => 1, "wikimedia.org" => 1, "twitpic.com" => 1, - "amazonaws.com" => 1, # used by twitpic.com + "amazonaws.com" => 1, + "blogspot.com" => 1, + "photoshelter.com" => 1, + "myspacecdn.com" => 1, + "feedburner.com" => 1, + "wikia.com" => 1, + "ljplus.ru" => 1, + "yandex.ru" => 1, + "imgur.com" => 1, + "yfrog.com" => 1, "yimg.com" => 1, # This is where dailynews.yahoo.com stores "eimg.com" => 1, # its images, so pick_from_yahoo_news_text() @@ -238,44 +260,53 @@ my %warningless_sites = ( # my %entity_table = ( "apos" => '\'', - "quot" => '"', "amp" => '&', "lt" => '<', "gt" => '>', - "nbsp" => ' ', "iexcl" => '¡', "cent" => '¢', "pound" => '£', - "curren" => '¤', "yen" => '¥', "brvbar" => '¦', "sect" => '§', - "uml" => '¨', "copy" => '©', "ordf" => 'ª', "laquo" => '«', - "not" => '¬', "shy" => '­', "reg" => '®', "macr" => '¯', - "deg" => '°', "plusmn" => '±', "sup2" => '²', "sup3" => '³', - "acute" => '´', "micro" => 'µ', "para" => '¶', "middot" => '·', - "cedil" => '¸', "sup1" => '¹', "ordm" => 'º', "raquo" => '»', - "frac14" => '¼', "frac12" => '½', "frac34" => '¾', "iquest" => '¿', - "Agrave" => 'À', "Aacute" => 'Á', "Acirc" => 'Â', "Atilde" => 'Ã', - "Auml" => 'Ä', "Aring" => 'Å', "AElig" => 'Æ', "Ccedil" => 'Ç', - "Egrave" => 'È', "Eacute" => 'É', "Ecirc" => 'Ê', "Euml" => 'Ë', - "Igrave" => 'Ì', "Iacute" => 'Í', "Icirc" => 'Î', "Iuml" => 'Ï', - "ETH" => 'Ð', "Ntilde" => 'Ñ', "Ograve" => 'Ò', "Oacute" => 'Ó', - "Ocirc" => 'Ô', "Otilde" => 'Õ', "Ouml" => 'Ö', "times" => '×', - "Oslash" => 'Ø', "Ugrave" => 'Ù', "Uacute" => 'Ú', "Ucirc" => 'Û', - "Uuml" => 'Ü', "Yacute" => 'Ý', "THORN" => 'Þ', "szlig" => 'ß', - "agrave" => 'à', "aacute" => 'á', "acirc" => 'â', "atilde" => 'ã', - "auml" => 'ä', "aring" => 'å', "aelig" => 'æ', "ccedil" => 'ç', - "egrave" => 'è', "eacute" => 'é', "ecirc" => 'ê', "euml" => 'ë', - "igrave" => 'ì', "iacute" => 'í', "icirc" => 'î', "iuml" => 'ï', - "eth" => 'ð', "ntilde" => 'ñ', "ograve" => 'ò', "oacute" => 'ó', - "ocirc" => 'ô', "otilde" => 'õ', "ouml" => 'ö', "divide" => '÷', - "oslash" => 'ø', "ugrave" => 'ù', "uacute" => 'ú', "ucirc" => 'û', - "uuml" => 'ü', "yacute" => 'ý', "thorn" => 'þ', "yuml" => 'ÿ', + "quot" => '"', "amp" => '&', "lt" => '<', + "gt" => '>', "nbsp" => ' ', "iexcl" => '', + "cent" => "\xA2", "pound" => "\xA3", "curren" => "\xA4", + "yen" => "\xA5", "brvbar" => "\xA6", "sect" => "\xA7", + "uml" => "\xA8", "copy" => "\xA9", "ordf" => "\xAA", + "laquo" => "\xAB", "not" => "\xAC", "shy" => "\xAD", + "reg" => "\xAE", "macr" => "\xAF", "deg" => "\xB0", + "plusmn" => "\xB1", "sup2" => "\xB2", "sup3" => "\xB3", + "acute" => "\xB4", "micro" => "\xB5", "para" => "\xB6", + "middot" => "\xB7", "cedil" => "\xB8", "sup1" => "\xB9", + "ordm" => "\xBA", "raquo" => "\xBB", "frac14" => "\xBC", + "frac12" => "\xBD", "frac34" => "\xBE", "iquest" => "\xBF", + "Agrave" => "\xC0", "Aacute" => "\xC1", "Acirc" => "\xC2", + "Atilde" => "\xC3", "Auml" => "\xC4", "Aring" => "\xC5", + "AElig" => "\xC6", "Ccedil" => "\xC7", "Egrave" => "\xC8", + "Eacute" => "\xC9", "Ecirc" => "\xCA", "Euml" => "\xCB", + "Igrave" => "\xCC", "Iacute" => "\xCD", "Icirc" => "\xCE", + "Iuml" => "\xCF", "ETH" => "\xD0", "Ntilde" => "\xD1", + "Ograve" => "\xD2", "Oacute" => "\xD3", "Ocirc" => "\xD4", + "Otilde" => "\xD5", "Ouml" => "\xD6", "times" => "\xD7", + "Oslash" => "\xD8", "Ugrave" => "\xD9", "Uacute" => "\xDA", + "Ucirc" => "\xDB", "Uuml" => "\xDC", "Yacute" => "\xDD", + "THORN" => "\xDE", "szlig" => "\xDF", "agrave" => "\xE0", + "aacute" => "\xE1", "acirc" => "\xE2", "atilde" => "\xE3", + "auml" => "\xE4", "aring" => "\xE5", "aelig" => "\xE6", + "ccedil" => "\xE7", "egrave" => "\xE8", "eacute" => "\xE9", + "ecirc" => "\xEA", "euml" => "\xEB", "igrave" => "\xEC", + "iacute" => "\xED", "icirc" => "\xEE", "iuml" => "\xEF", + "eth" => "\xF0", "ntilde" => "\xF1", "ograve" => "\xF2", + "oacute" => "\xF3", "ocirc" => "\xF4", "otilde" => "\xF5", + "ouml" => "\xF6", "divide" => "\xF7", "oslash" => "\xF8", + "ugrave" => "\xF9", "uacute" => "\xFA", "ucirc" => "\xFB", + "uuml" => "\xFC", "yacute" => "\xFD", "thorn" => "\xFE", + "yuml" => "\xFF", # HTML 4 entities that do not have 1:1 Latin1 mappings. - "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"", - "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->", - "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>", - "empty" => "Ø", "minus" => "-", "lowast"=> "*", "sim" => "~", - "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==", - "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">", - "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y", - "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ", - "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`", - "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"", - "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">", + "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"", + "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->", + "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>", + "empty" => "\xD8", "minus" => "-", "lowast"=> "*", "sim" => "~", + "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==", + "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">", + "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y", + "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ", + "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`", + "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"", + "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">", ); @@ -353,170 +384,62 @@ sub get_document_1($$$) { if (!defined($timeout)) { $timeout = $http_timeout; } if ($timeout > $http_timeout) { $timeout = $http_timeout; } - if ($timeout <= 0) { - LOG (($verbose_net || $verbose_load), "timed out for $url"); - return (); - } - - LOG ($verbose_net, "get_document_1 $url " . ($referer ? $referer : "")); - - if (! ($url =~ m@^http://@i)) { - LOG ($verbose_net, "not an HTTP URL: $url"); - return (); - } + my $user_agent = "$progname/$version"; - my ($url_proto, $dummy, $serverstring, $path) = split(/\//, $url, 4); - $path = "" unless $path; + if ($url =~ m@^http://www\.altavista\.com/@ || + $url =~ m@^http://random\.yahoo\.com/@ || + $url =~ m@^http://images\.google\.com/@ || + $url =~ m@^http://www\.google\.com/@) { + # block this, you turkeys. + $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" . + " Gecko/20070914 Firefox/2.0.0.7"; - if (!$url_proto || !$serverstring) { - LOG (($verbose_net || $verbose_load), "unparsable URL: $url"); - return (); + # 28-Jun-2007: Google Images now emits the entire page in JS if + # you claim to be Gecko. They also still block "webcollage". + # They serve non-JS for unrecognised agents, so let's try this... + $user_agent = "NoJavascriptPlease/1.0" + if ($url =~ m@^http://[a-z]+\.google\.com/@); } - my ($them,$port) = split(/:/, $serverstring); - $port = 80 unless $port; - - my $them2 = $them; - my $port2 = $port; - if ($http_proxy) { - $serverstring = $http_proxy if $http_proxy; - $serverstring =~ s@^[a-z]+://@@; - ($them2,$port2) = split(/:/, $serverstring); - $port2 = 80 unless $port2; - } + my $ua = LWP::UserAgent->new; + $ua->env_proxy(); + $ua->agent ("$progname/$version"); + $ua->default_header ('Referer' => $referer); + $ua->timeout($timeout) if $timeout; - my ($remote, $iaddr, $paddr, $proto, $line); - $remote = $them2; - if ($port2 =~ /\D/) { $port2 = getservbyname($port2, 'tcp') } - if (!$port2) { - LOG (($verbose_net || $verbose_load), "unrecognised port in $url"); - return (); + if ($verbose_http) { + LOG (1, " ==> GET $url"); + LOG (1, " ==> User-Agent: $user_agent"); + LOG (1, " ==> Referer: $referer") if $referer; } - $iaddr = inet_aton($remote); - if (!$iaddr) { - LOG (($verbose_net || $verbose_load), "host not found: $remote"); - return (); - } - $paddr = sockaddr_in($port2, $iaddr); - - - my $head = ""; - my $body = ""; - - @_ = - eval { - local $SIG{ALRM} = sub { - LOG (($verbose_net || $verbose_load), "timed out ($timeout) for $url"); - die "alarm\n"; - }; - alarm $timeout; - - $proto = getprotobyname('tcp'); - if (!socket(S, PF_INET, SOCK_STREAM, $proto)) { - LOG (($verbose_net || $verbose_load), "socket: $!"); - return (); - } - if (!connect(S, $paddr)) { - LOG (($verbose_net || $verbose_load), "connect($serverstring): $!"); - return (); - } - - select(S); $| = 1; select(STDOUT); - - my $cookie = $cookies{$them}; - - my $user_agent = "$progname/$version"; - - if ($url =~ m@^http://www\.altavista\.com/@ || - $url =~ m@^http://random\.yahoo\.com/@ || - $url =~ m@^http://images\.google\.com/@ || - $url =~ m@^http://www\.google\.com/@) { - # block this, you turkeys. - $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" . - " Gecko/20070914 Firefox/2.0.0.7"; - - # 28-Jun-2007: Google Images now emits the entire page in JS if - # you claim to be Gecko. They also still block "webcollage". - # They serve non-JS for unrecognised agents, so let's try this... - $user_agent = "NoJavascriptPlease/1.0" - if ($url =~ m@^http://[a-z]+\.google\.com/@); - } - my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" . - "Host: $them\r\n" . - "User-Agent: $user_agent\r\n"; - if ($referer) { - $hdrs .= "Referer: $referer\r\n"; - } - if ($cookie) { - my @cc = split(/\r?\n/, $cookie); - $hdrs .= "Cookie: " . join('; ', @cc) . "\r\n"; - } - $hdrs .= "\r\n"; + my $res = $ua->get ($url); - foreach (split('\r?\n', $hdrs)) { - LOG ($verbose_http, " ==> $_"); - } - print S $hdrs; - my $http = || ""; + my $http = ($res ? $res->status_line : '') || ''; + my $head = ($res ? $res->headers()->as_string : '') || ''; + my $body = ($res && $res->is_success ? $res->decoded_content : '') || ''; - # Kludge: the Yahoo Random Link is now returning as its first - # line "Status: 301" instead of "HTTP/1.0 301 Found". Fix it... - # - $http =~ s@^Status:\s+(\d+)\b@HTTP/1.0 $1@i; + LOG ($verbose_net, "get_document_1 $url " . ($referer ? $referer : "")); - $_ = $http; - s/[\r\n]+$//s; + $head =~ s/\r\n/\n/gs; + $head =~ s/\r/\n/gs; + if ($verbose_http) { + foreach (split (/\n/, $head)) { LOG ($verbose_http, " <== $_"); - - while () { - $head .= $_; - s/[\r\n]+$//s; - last if m@^$@; - LOG ($verbose_http, " <== $_"); - - if (m@^Set-cookie:\s*([^;\r\n]+)@i) { - set_cookie($them, $1) - } - } - - my $lines = 0; - while () { - $body .= $_; - $lines++; - } - - LOG ($verbose_http, - " <== [ body ]: $lines lines, " . length($body) . " bytes"); - - close S; - - if (!$http) { - LOG (($verbose_net || $verbose_load), "null response: $url"); - return (); - } - - $SIG{ALRM} = 'DEFAULT'; # seem to be suffering a race? - return ( $http, $head, $body ); - }; - die if ($@ && $@ ne "alarm\n"); # propagate errors - - if ($@ && $@ ne "alarm\n") { - print STDERR blurb() . "DIE " . join(" ", $@) . "\n"; - die; + } } - if ($@) { - # timed out - $head = undef; - $body = undef; - $suppress_audit = 1; + my @L = split(/\r\n|\r|\n/, $body); + my $lines = @L; + LOG ($verbose_http, + " <== [ body ]: $lines lines, " . length($body) . " bytes"); + + if (!$http) { + LOG (($verbose_net || $verbose_load), "null response: $url"); return (); - } else { - # didn't - alarm 0; - return @_; } + + return ( $http, $head, $body ); } @@ -718,30 +641,39 @@ sub pick_image_from_body($$) { my %unique_urls; foreach (split(/ * 1000) { - LOG (($verbose_filter || $verbose_load), - "excessive keywords ($L bytes) in $url: rejecting."); - $rejected_urls{$url} = $L; - $body = undef; - $_ = undef; - return (); - } else { - LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)"); - } + my $L = length($_); + if ($L > 1000) { + LOG (($verbose_filter || $verbose_load), + "excessive keywords ($L bytes) in $url: rejecting."); + $rejected_urls{$url} = $L; + $body = undef; + $_ = undef; + return (); + } else { + LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)"); } - } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) { + } elsif (m/^ (IMG|A) \b .* (SRC|HREF) \s* = \s* ["']? (.*?) [ "'<>] /six || + m/^ (LINK|META) \b .* (REL|PROPERTY) \s* = \s* + ["']? (image_src|og:image) ["']? /six) { - my $was_inline = (! ( "$1" eq "a" || "$1" eq "A" )); + my $was_inline = (lc($1) eq 'img'); + my $was_meta = (lc($1) eq 'link' || lc($1) eq 'meta'); my $link = $3; + + # For + # and + # + if ($was_meta) { + next unless (m/ (HREF|CONTENT) \s* = \s* ["']? (.*?) [ "'<>] /six); + $link = $2; + } + my ( $width ) = m/width ?=[ \"]*(\d+)/oi; my ( $height ) = m/height ?=[ \"]*(\d+)/oi; $_ = $link; @@ -813,20 +745,24 @@ sub pick_image_from_body($$) { LOG ($verbose_filter, " image $url" . ($width && $height ? " (${width}x${height})" : "") . - ($was_inline ? " (inline)" : "")); + ($was_meta ? " (meta)" : $was_inline ? " (inline)" : "")); - $urls[++$#urls] = $url; - $unique_urls{$url}++; - # JPEGs are preferable to GIFs and PNGs. - $_ = $url; - if ( ! m@[.](gif|png)$@io ) { - $urls[++$#urls] = $url; + my $weight = 1; + + if ($was_meta) { + $weight = 20; # meta tag images are far preferable to inline images. + } else { + if ($url !~ m@[.](gif|png)$@io ) { + $weight += 2; # JPEGs are preferable to GIFs and PNGs. + } + if (! $was_inline) { + $weight += 4; # pointers to images are preferable to inlined images. + } } - # pointers to images are preferable to inlined images. - if ( ! $was_inline ) { - $urls[++$#urls] = $url; + $unique_urls{$url}++; + for (my $i = 0; $i < $weight; $i++) { $urls[++$#urls] = $url; } } @@ -853,6 +789,7 @@ sub pick_image_from_body($$) { return $url; } + # Given a URL and the RSS feed from that URL, pick a random image from # the feed. This is a lot simpler than extracting images out of a page: # we already know we have reasonable images, so we just pick one. @@ -905,12 +842,9 @@ sub pick_dictionary() { # sub random_word() { - local *IN; - if (! open (IN, "<$wordlist")) { - return undef; - } + return undef unless open (my $in, '<', $wordlist); - my $size = (stat(IN))[7]; + my $size = (stat($in))[7]; my $word = undef; my $count = 0; @@ -919,9 +853,9 @@ sub random_word() { if (++$count > 100); my $pos = int (rand ($size)); - if (seek (IN, $pos, 0)) { - $word = ; # toss partial line - $word = ; # keep next line + if (seek ($in, $pos, 0)) { + $word = <$in>; # toss partial line + $word = <$in>; # keep next line } next unless ($word); @@ -945,7 +879,7 @@ sub random_word() { last if ($word); } - close (IN); + close ($in); if ( $word =~ s/\s/\+/gs ) { # convert intra-word spaces to "+". $word = "\%22$word\%22"; # And put quotes (%22) around it. @@ -956,13 +890,12 @@ sub random_word() { sub random_words($) { - my ($or_p) = @_; - my $sep = ($or_p ? "%20OR%20" : "%20"); - return (random_word . $sep . - random_word . $sep . - random_word . $sep . - random_word . $sep . - random_word); + my ($sep) = @_; + return (random_word() . $sep . + random_word() . $sep . + random_word() . $sep . + random_word() . $sep . + random_word()); } @@ -1033,6 +966,26 @@ sub pick_from_search_engine($$$) { my @subpages; + if ($body =~ m/^\{\"/s) { # Google AJAX JSON response. + + my @chunks = split (/"GsearchResultClass"/, $body); + shift @chunks; + my $body2 = ''; + my $n = 1; + foreach (@chunks) { + my ($img) = m/"unescapedUrl":"(.*?)"/si; + my ($url) = m/"originalContextUrl":"(.*?)"/si; + next unless ($img && $url); + $url = ("/imgres" . + "?imgurl=" . url_quote($img) . + "&imgrefurl=" . url_quote($url) . + "&..."); + $body2 .= "$n\n"; + $n++; + } + $body = $body2 if $body2; + } + my $search_count = "?"; if ($body =~ m@found (approximately |about )?()?(\d+)()? image@) { $search_count = $3; @@ -1062,11 +1015,10 @@ sub pick_from_search_engine($$$) { 1 while ($search_count =~ s/^(\d+)(\d{3})/$1,$2/); # if ($search_count eq "?" || $search_count eq "0") { -# local *OUT; # my $file = "/tmp/wc.html"; -# open(OUT, ">$file") || error ("writing $file: $!"); -# print OUT $body; -# close OUT; +# open (my $out, '>', $file) || error ("writing $file: $!"); +# print $out $body; +# close $out; # print STDERR blurb() . "###### wrote $file\n"; # } @@ -1085,8 +1037,15 @@ sub pick_from_search_engine($$$) { my ($u) = m@]+)>@i; next unless $u; - if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string - elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token + if (m/\bm="{(.*?)}"/s) { # Bing info is inside JSON crud + my $json = html_unquote($1); + my ($href) = ($json =~ m/\bsurl:"(.*?)"/s); + my ($img) = ($json =~ m/\bimgurl:"(.*?)"/s); + $u = "$img\t$href" if ($img && $href); + + } elsif ($u =~ m/^\"([^\"]*)\"/) { $u = $1 # quoted string + } elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; # or token + } if ( $rejected_urls{$u} ) { LOG ($verbose_filter, " pre-rejecting candidate: $u"); @@ -1383,11 +1342,10 @@ sub pick_from_security_camera($) { ############################################################################ -my $google_images_url = "http://images.google.com/images" . - "?site=images" . # photos - "&btnG=Search" . # graphics - "&safe=off" . # no screening - "&imgsafe=off" . +my $google_images_url = "http://ajax.googleapis.com/ajax/services/" . + "search/images" . + "?v=1.0" . + "&rsz=large" . "&q="; # googleimgs @@ -1395,17 +1353,11 @@ sub pick_from_google_images($;$$) { my ($timeout, $words, $max_page) = @_; if (!defined($words)) { - $words = random_word; # only one word for Google + $words = random_word(); # only one word for Google } - my $page = (int(rand(9)) + 1); - my $num = 20; # 20 images per page - my $search_url = $google_images_url . $words; - - if ($page > 1) { - $search_url .= "&start=" . $page*$num; # page number - $search_url .= "&num=" . $num; #images per page - } + my $off = int(rand(40)); + my $search_url = $google_images_url . $words . "&start=" . $off; my ($search_hit_count, @subpages) = pick_from_search_engine ($timeout, $search_url, $words); @@ -1416,10 +1368,14 @@ sub pick_from_google_images($;$$) { next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins - if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) { + $u = html_unquote($u); + if ($u =~ m@^/imgres\?imgurl=(.*?)&imgrefurl=(.*?)\&@) { my $ref = $2; my $img = $1; - $img = "http://$img" unless ($img =~ m/^http:/i); + $ref = url_decode($ref); + $img = url_decode($img); + + $img = "http://$img" unless ($img =~ m/^https?:/i); LOG ($verbose_filter, " candidate: $ref"); push @candidates, $img; @@ -1523,12 +1479,99 @@ sub pick_from_google_image_photos($) { my $i = int(rand($#photomakers + 1)); my $fn = $photomakers[$i]; my $file = &$fn; - my $words .= $file . "%20filetype:jpg"; + #$file .= "%20filetype:jpg"; + + pick_from_google_images ($timeout, $file); +} + + +############################################################################ +# +# Pick images by feeding random words into Google Image Search. +# By the way: fuck Microsoft. +# +############################################################################ + +my $bing_images_url = "http://www.bing.com/images/async" . + "?CW=0" . + "&CH=0" . + "&q="; + + +# bingimgs +sub pick_from_bing_images($;$$) { + my ($timeout, $words, $max_page) = @_; + + if (!defined($words)) { + $words = random_word(); # only one word for Bing + } + + my $off = int(rand(300)); + my $search_url = $bing_images_url . $words . "&first=" . $off; + + my ($search_hit_count, @subpages) = + pick_from_search_engine ($timeout, $search_url, $words); + + my @candidates = (); + my %referers; + foreach my $u (@subpages) { + my ($img, $ref) = ($u =~ m/^(.*?)\t(.*)$/s); + next unless $img; + LOG ($verbose_filter, " candidate: $ref"); + push @candidates, $img; + $referers{$img} = $ref; + } + + @candidates = depoison (@candidates); + return () if ($#candidates < 0); + my $i = int(rand($#candidates+1)); + my $img = $candidates[$i]; + my $ref = $referers{$img}; - pick_from_google_images ($timeout, $words); + LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)"); + return ($ref, $img); } + + +############################################################################ +# +# Pick images by feeding random numbers into Bing Image Search. +# +############################################################################ + +# bingnums +sub pick_from_bing_image_numbers($) { + my ($timeout) = @_; + + my $max = 9999; + my $number = int(rand($max)); + + $number = sprintf("%04d", $number) + if (rand() < 0.3); + + pick_from_bing_images ($timeout, "$number"); +} + + +############################################################################ +# +# Pick images by feeding random numbers into Bing Image Search. +# +############################################################################ + +# bingphotos +sub pick_from_bing_image_photos($) { + my ($timeout) = @_; + + my $i = int(rand($#photomakers + 1)); + my $fn = $photomakers[$i]; + my $file = &$fn; + + pick_from_bing_images ($timeout, $file); +} + ############################################################################ # @@ -1549,7 +1592,7 @@ my $alta_vista_url = "http://www.altavista.com/web/results" . sub pick_from_alta_vista_text($) { my ($timeout) = @_; - my $words = random_words(0); + my $words = random_words('%20'); my $page = (int(rand(9)) + 1); my $search_url = $alta_vista_url . $words; @@ -1872,28 +1915,28 @@ sub pick_from_ircimages($) { ############################################################################ # -# Pick images from Twitter's list of recently-posted images. +# Pick images from Twitpic's list of recently-posted images. # ############################################################################ -my $twitter_img_url = "http://twitpic.com/public_timeline/feed.rss"; +my $twitpic_img_url = "http://twitpic.com/public_timeline/feed.rss"; # With most of our image sources, we get a random page and then select -# from the images on it. However, in the case of Twitter, the page +# from the images on it. However, in the case of Twitpic, the page # of images tends to update slowly; so we'll remember the last N entries # on it and randomly select from those, to get a wider variety each time. -my $twit_cache_size = 1000; -my @twit_cache = (); # fifo, for ordering by age -my %twit_cache = (); # hash, for detecting dups +my $twitpic_cache_size = 1000; +my @twitpic_cache = (); # fifo, for ordering by age +my %twitpic_cache = (); # hash, for detecting dups -# twitter -sub pick_from_twitter_images($) { +# twitpic +sub pick_from_twitpic_images($) { my ($timeout) = @_; - $last_search = $twitter_img_url; # for warnings + $last_search = $twitpic_img_url; # for warnings - my ( $base, $body ) = get_document ($twitter_img_url, undef, $timeout); + my ( $base, $body ) = get_document ($twitpic_img_url, undef, $timeout); # Update the cache. @@ -1910,37 +1953,38 @@ sub pick_from_twitter_images($) { $page =~ s@/$@@s; $page .= '/full'; - next if ($twit_cache{$page}); # already have it + next if ($twitpic_cache{$page}); # already have it LOG ($verbose_filter, " candidate: $page"); - push @twit_cache, $page; - $twit_cache{$page} = $page; + push @twitpic_cache, $page; + $twitpic_cache{$page} = $page; } } # Pull from the cache. - return () if ($#twit_cache == -1); + return () if ($#twitpic_cache == -1); - my $n = $#twit_cache+1; + my $n = $#twitpic_cache+1; my $i = int(rand($n)); - my $page = $twit_cache[$i]; + my $page = $twitpic_cache[$i]; - # delete this one from @twit_cache and from %twit_cache. + # delete this one from @twitpic_cache and from %twitpic_cache. # - @twit_cache = ( @twit_cache[0 .. $i-1], - @twit_cache[$i+1 .. $#twit_cache] ); - delete $twit_cache{$page}; + @twitpic_cache = ( @twitpic_cache[0 .. $i-1], + @twitpic_cache[$i+1 .. $#twitpic_cache] ); + delete $twitpic_cache{$page}; # Keep the size of the cache under the limit by nuking older entries # - while ($#twit_cache >= $twit_cache_size) { - my $page = shift @twit_cache; - delete $twit_cache{$page}; + while ($#twitpic_cache >= $twitpic_cache_size) { + my $page = shift @twitpic_cache; + delete $twitpic_cache{$page}; } ( $base, $body ) = get_document ($page, undef, $timeout); my $img = undef; + $body = '' unless defined($body); foreach (split (/= $twitter_cache_size) { + my $page = shift @twitter_cache; + delete $twitter_cache{$page}; + } + + LOG ($verbose_load, "picked page $url"); + + $suppress_audit = 1; + + return ($page, $url); +} + ############################################################################ # @@ -1975,7 +2108,7 @@ sub pick_from_twitter_images($) { # ############################################################################ -my $flickr_img_url = "http://www.flickr.com/photos/"; +my $flickr_img_url = "http://www.flickr.com/explore/"; # Like LiveJournal, the Flickr page of images tends to update slowly, # so remember the last N entries on it and randomly select from those. @@ -2006,13 +2139,15 @@ sub pick_from_flickr_recent($) { my $count = 0; my $count2 = 0; foreach (split (/\n/, $body)) { + my ($page, $thumb) = m@]* \b HREF=\"([^<>\"]+)\" [^<>]* > \s* - ]* \b SRC=\"([^<>\"]+)\" @xsi; + ]* \b + data-defer-src = \"([^<>\"]+)\" @xsi; next unless defined ($thumb); $page = html_unquote ($page); $thumb = html_unquote ($thumb); - next unless ($thumb =~ m@^http://farm\d*\.static\.flickr\.com/@); + next unless ($thumb =~ m@^http://farm\d*\.static\.?flickr\.com/@); my $base = "http://www.flickr.com/"; $page =~ s@^/@$base@; @@ -2063,8 +2198,8 @@ sub pick_from_flickr_recent($) { # ############################################################################ -my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne" . - "?format=rss_200_enc&tags="); +my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne". + "?format=rss_200_enc&tagmode=any&tags="); # Picks a random RSS feed; picks a random image from that feed; # returns 2 URLs: the page containing the image, and the image. @@ -2074,10 +2209,15 @@ my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne" sub pick_from_flickr_random($) { my $timeout = shift; - my $rss = $flickr_rss_base . random_word(); + my $words = random_words(','); + my $rss = $flickr_rss_base . $words; $last_search = $rss; + $_ = $words; + s/,/ /g; + print STDERR "\n\n" if ($verbose_load); + LOG ($verbose_load, "words: $_"); LOG ($verbose_load, "URL: $last_search"); $suppress_audit = 1; @@ -2097,6 +2237,56 @@ sub pick_from_flickr_random($) { return ($base, $img); } + +############################################################################ +# +# Pick random images from Instagram, via gramfeed.com's key. +# +############################################################################ + +my $instagram_url_base = "https://api.instagram.com/v1/media/popular" . + "?client_id=b59fbe4563944b6c88cced13495c0f49"; + +# instagram_random +sub pick_from_instagram($) { + my $timeout = shift; + + $last_search = $instagram_url_base; + + print STDERR "\n\n" if ($verbose_load); + LOG ($verbose_load, "URL: $last_search"); + + my ( $base, $body ) = get_document ($last_search, undef, $timeout); + if (!$base || !$body) { + $body = undef; + return; + } + + $body =~ s/("link")/\001$1/gs; + my @chunks = split(/\001/, $body); + shift @chunks; + my @urls = (); + foreach (@chunks) { + s/\\//gs; + my ($url) = m/"link":\s*"(.*?)"/s; + my ($img) = m/"standard_resolution":{"url":\s*"(.*?)"/s; + ($img) = m/"url":\s*"(.*?)"/s unless $url; + next unless ($url && $img); + push @urls, [ $url, $img ]; + } + + if ($#urls < 0) { + LOG ($verbose_load, "no images on $last_search"); + return (); + } + + my $i = int(rand($#urls+1)); + my ($url, $img) = @{$urls[$i]}; + + LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#urls+1) . ": $url"); + return ($url, $img); +} + ############################################################################ # @@ -2126,16 +2316,15 @@ sub pick_from_driftnet($) { $last_search = $id; while ($now = time, $now < $start + $timeout) { - local *DIR; - opendir (DIR, $dir) || error ("$dir: $!"); - while (my $file = readdir(DIR)) { + opendir (my $dir, $dir) || error ("$dir: $!"); + while (my $file = readdir($dir)) { next if ($file =~ m/^\./); $file = "$dir/$file"; - closedir DIR; + closedir ($dir); LOG ($verbose_load, "picked file $file ($id)"); return ($id, $file); } - closedir DIR; + closedir ($dir); } LOG (($verbose_net || $verbose_load), "timed out for $id"); return (); @@ -2148,15 +2337,14 @@ sub get_driftnet_file($) { error ("\$driftnet_dir unset?") unless ($driftnet_dir); my $id = $driftnet_magic; - my $re = qr/$driftnet_dir/; error ("$id: $file not in $driftnet_dir?") - unless ($file =~ m@^$re@o); + unless ($file =~ m@^\Q$driftnet_dir@o); - local *IN; - open (IN, $file) || error ("$id: $file: $!"); + open (my $in, '<', $file) || error ("$id: $file: $!"); my $body = ''; - while () { $body .= $_; } - close IN || error ("$id: $file: $!"); + local $/ = undef; # read entire file + $body = <$in>; + close ($in) || error ("$id: $file: $!"); unlink ($file) || error ("$id: $file: rm: $!"); return ($id, $body); } @@ -2205,8 +2393,8 @@ sub spawn_driftnet($) { } # local-directory -sub pick_from_local_dir { - my ( $timeout ) = @_; +sub pick_from_local_dir($) { + my ($timeout) = @_; my $id = $local_magic; $last_search = $id; @@ -2219,6 +2407,7 @@ sub pick_from_local_dir { my $v = ($verbose_exec ? "-v" : ""); my $pick = `xscreensaver-getimage-file $v "$dir"`; + $pick =~ s/\s+$//s; $pick = "$dir/$pick" unless ($pick =~ m@^/@s); # relative path LOG ($verbose_load, "picked file $pick ($id)"); @@ -2226,21 +2415,19 @@ sub pick_from_local_dir { } -sub get_local_file { +sub get_local_file($) { my ($file) = @_; error ("\$local_dir unset?") unless ($local_dir); my $id = $local_magic; - my $re = qr/$local_dir/; error ("$id: $file not in $local_dir?") - unless ($file =~ m@^$re@o); + unless ($file =~ m@^\Q$local_dir@o); - local *IN; - open (IN, $file) || error ("$id: $file: $!"); - my $body = ''; - while () { $body .= $_; } - close IN || error ("$id: $file: $!"); + open (my $in, '<', $file) || error ("$id: $file: $!"); + local $/ = undef; # read entire file + my $body = <$in>; + close ($in) || error ("$id: $file: $!"); return ($id, $body); } @@ -2829,12 +3016,12 @@ sub image_to_pnm($$$) { $body = undef; }; - if (($pid = open(PIPE, "| $cmd2 > $output"))) { + if (($pid = open (my $pipe, "| $cmd2 > $output"))) { $timed_out = 0; alarm $cvt_timeout; - print PIPE $body; + print $pipe $body; $body = undef; - close PIPE; + close $pipe; LOG ($verbose_exec, "awaiting $pid"); waitpid ($pid, 0); @@ -2889,10 +3076,9 @@ sub ppmmake($$$$) { my $pixel = pack('CCC', $r, $g, $b); my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h)); - local *OUT; - open (OUT, ">$outfile") || error ("$outfile: $!"); - print OUT $bits; - close OUT; + open (my $out, '>', $outfile) || error ("$outfile: $!"); + print $out $bits; + close $out; } @@ -3031,12 +3217,12 @@ sub x_or_pbm_output($) { my ($iw, $ih); my $body = ""; - local *IMG; - open(IMG, "<$bgimage") || error "couldn't open $bgimage: $!"; - my $cmd; - while () { $body .= $_; } - close (IMG); + open (my $imgf, '<', $bgimage) || error "couldn't open $bgimage: $!"; + local $/ = undef; # read entire file + $body = <$imgf>; + close ($imgf); + my $cmd; if ((@_ = gif_size ($body))) { ($iw, $ih) = @_; $cmd = "giftopnm |"; @@ -3064,10 +3250,10 @@ sub x_or_pbm_output($) { "pasting $bgimage (${iw}x$ih) into base image at $x,$y"); $cmd .= "pnmpaste - $x $y $image_ppm > $image_tmp1"; - open (IMG, "| $cmd") || error "running $cmd: $!"; - print IMG $body; + open ($imgf, "| $cmd") || error "running $cmd: $!"; + print $imgf $body; $body = undef; - close (IMG); + close ($imgf); LOG ($verbose_exec, "subproc exited normally."); rename ($image_tmp1, $image_ppm) || error "renaming $image_tmp1 to $image_ppm: $!"; @@ -3125,10 +3311,9 @@ sub paste_image($$$$) { return 0; } - local *OUT; - open (OUT, ">$image_tmp1") || error ("writing $image_tmp1: $!"); - print OUT $body || error ("writing $image_tmp1: $!"); - close OUT || error ("writing $image_tmp1: $!"); + open (my $out, '>', $image_tmp1) || error ("writing $image_tmp1: $!"); + (print $out $body) || error ("writing $image_tmp1: $!"); + close ($out) || error ("writing $image_tmp1: $!"); } else { ($iw, $ih) = image_to_pnm ($img, $body, $image_tmp1); @@ -3160,12 +3345,11 @@ sub paste_image($$$$) { rename ($image_tmp2, $image_tmp1); # re-get the width/height in case the filter resized it. - local *IMG; - open(IMG, "<$image_tmp1") || return 0; - $_ = ; - $_ = ; + open (my $imgf, '<', $image_tmp1) || return 0; + $_ = <$imgf>; + $_ = <$imgf>; ($iw, $ih) = m/^(\d+) (\d+)$/; - close (IMG); + close ($imgf); return 0 unless ($iw && $ih); } @@ -3432,6 +3616,8 @@ sub update_imagemap($$$$$$$$) { my $imagemap_html = $imagemap_base . ".html"; my $imagemap_jpg = $imagemap_base . ".jpg"; + my $imagemap_jpg2 = $imagemap_jpg; + $imagemap_jpg2 =~ s@^.*/@@gs; if (!defined ($imagemap_html_tmp)) { $imagemap_html_tmp = $imagemap_html . sprintf (".%08x", rand(0xffffffff)); @@ -3442,16 +3628,16 @@ sub update_imagemap($$$$$$$$) { # my $template_html = ''; { - local *IN; - if (open (IN, "<$imagemap_html")) { - while () { $template_html .= $_; } - close IN; + if (open (my $in, '<', $imagemap_html)) { + local $/ = undef; # read entire file + $template_html = <$in>; + close $in; LOG ($verbose_pbm, "read template $imagemap_html"); } if ($template_html =~ m/^\s*$/s) { $template_html = ("\n" . - "\n"); LOG ($verbose_pbm, "created dummy template"); } @@ -3478,7 +3664,7 @@ sub update_imagemap($$$$$$$$) { my $body = $template_html; my $areas = join ("\n\t", @imagemap_areas); my $map = ("\n\t$areas\n"); - my $img = (""); @@ -3495,10 +3681,9 @@ sub update_imagemap($$$$$$$$) { $body =~ s@().*?()@$1$size$2@si; } - local *OUT; - open (OUT, ">$imagemap_html_tmp") || error ("$imagemap_html_tmp: $!"); - print OUT $body || error ("$imagemap_html_tmp: $!"); - close OUT || error ("$imagemap_html_tmp: $!"); + open (my $out, '>', $imagemap_html_tmp) || error ("$imagemap_html_tmp: $!"); + (print $out $body) || error ("$imagemap_html_tmp: $!"); + close ($out) || error ("$imagemap_html_tmp: $!"); LOG ($verbose_pbm, "wrote $imagemap_html_tmp"); }