#!/usr/bin/perl -w
#
-# webcollage, Copyright (c) 1999-2003 by Jamie Zawinski <jwz@jwz.org>
+# webcollage, Copyright © 1999-2015 by Jamie Zawinski <jwz@jwz.org>
# This program decorates the screen with random images from the web.
# One satisfied customer described it as "a nonstop pop culture brainbath."
#
# To run this as a display mode with xscreensaver, add this to `programs':
#
-# webcollage -root
-# webcollage -root -filter 'vidwhacker -stdin -stdout'
-
-
+# webcollage --root
+# webcollage --root --filter 'vidwhacker --stdin --stdout'
+#
+#
+# You can see this in action at http://www.jwz.org/webcollage/ --
+# it auto-reloads about once a minute. To make a page similar to
+# that on your own system, do this:
+#
+# webcollage --size '800x600' --imagemap $HOME/www/webcollage/index
+#
+#
# If you have the "driftnet" program installed, webcollage can display a
# collage of images sniffed off your local ethernet, instead of pulled out
# of search engines: in that way, your screensaver can display the images
# Driftnet is available here: http://www.ex-parrot.com/~chris/driftnet/
# Use it like so:
#
-# webcollage -root -driftnet
+# webcollage --root --driftnet
#
# Driftnet is the Unix implementation of the MacOS "EtherPEG" program.
#use diagnostics;
-use Socket;
require Time::Local;
require POSIX;
use Fcntl ':flock'; # import LOCK_* constants
use POSIX qw(strftime);
-
-use bytes; # Larry can take Unicode and shove it up his ass sideways.
- # Perl 5.8.0 causes us to start getting incomprehensible
- # errors about UTF-8 all over the place without this.
+use LWP::UserAgent;
+use bytes;
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.107 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
-my $copyright = "WebCollage $version, Copyright (c) 1999-2002" .
+my ($version) = ('$Revision: 1.172 $' =~ m/\s(\d[.\d]+)\s/s);
+my $copyright = "WebCollage $version, Copyright (c) 1999-2015" .
" Jamie Zawinski <jwz\@jwz.org>\n" .
- " http://www.jwz.org/xscreensaver/\n";
+ " http://www.jwz.org/webcollage/\n";
-my @search_methods = ( 77, "altavista", \&pick_from_alta_vista_random_link,
- 14, "yahoorand", \&pick_from_yahoo_random_link,
- 9, "yahoonews", \&pick_from_yahoo_news_text,
+my @search_methods = (
+ # Google is rate-limiting us now, so this works ok from
+ # a short-running screen saver, but not as a batch job.
+ # I haven't found a workaround.
+ #
+ 7, "googlephotos", \&pick_from_google_image_photos,
+ 5, "googleimgs", \&pick_from_google_images,
+ 5, "googlenums", \&pick_from_google_image_numbers,
- # Alta Vista has a new "random link" URL now.
- # They added it specifically to better support webcollage!
- # That was super cool of them. This is how we used to do
- # it, before:
+ # So let's try Bing instead. No rate limiting yet!
+ #
+ 7, "bingphotos", \&pick_from_bing_image_photos,
+ 6, "bingimgs", \&pick_from_bing_images,
+ 6, "bingnums", \&pick_from_bing_image_numbers,
+
+ 21, "flickr_recent", \&pick_from_flickr_recent,
+ 16, "flickr_random", \&pick_from_flickr_random,
+ 23, "instagram", \&pick_from_instagram,
+ 4, "livejournal", \&pick_from_livejournal_images,
+
+ # No longer exists, as of Apr 2014
+ # 4, "yahoorand", \&pick_from_yahoo_random_link,
+
+ # Twitter destroyed their whole API in 2013.
+ # 0, "twitpic", \&pick_from_twitpic_images,
+ # 0, "twitter", \&pick_from_twitter_images,
+
+ # This is a cute way to search for a certain webcams.
+ # Not included in default methods, since these images
+ # aren't terribly interesting by themselves.
+ # See also "SurveillanceSaver".
#
- # 0, "avimages", \&pick_from_alta_vista_images,
- # 0, "avtext", \&pick_from_alta_vista_text,
+ 0, "securitycam", \&pick_from_security_camera,
+
+ # Nonfunctional as of June 2011.
+ # 0, "altavista", \&pick_from_alta_vista_random_link,
- # Google asked (nicely) for me to stop searching them.
+ # In Apr 2002, Google asked me to stop searching them.
# I asked them to add a "random link" url. They said
# "that would be easy, we'll think about it" and then
- # never wrote back. Booo Google! Booooo!
+ # never wrote back. Booo Google! Booooo! So, screw
+ # those turkeys, I've turned Google searching back on.
+ # I'm sure they can take it. (Jan 2005.)
+
+ # Jan 2005: Yahoo fucked up their search form so that
+ # it's no longer possible to do "or" searches on news
+ # images, so we rarely get any hits there any more.
+ #
+ # 0, "yahoonews", \&pick_from_yahoo_news_text,
+
+ # Dec 2004: the ircimages guy's server can't take the
+ # heat, so he started banning the webcollage user agent.
+ # I tried to convince him to add a lighter-weight page to
+ # support webcollage better, but he doesn't care.
#
- # 0, "googlenums", \&pick_from_google_image_numbers,
- # 0, "googleimgs", \&pick_from_google_images,
+ # 0, "ircimages", \&pick_from_ircimages,
- # I suspect Hotbot is actually the same search engine
- # data as Lycos.
+ # Dec 2002: Alta Vista has a new "random link" URL now.
+ # They added it specifically to better support webcollage!
+ # That was super cool of them. This is how we used to do
+ # it, before:
+ #
+ # 0, "avimages", \&pick_from_alta_vista_images,
+ # 0, "avtext", \&pick_from_alta_vista_text,
+
+ # This broke in 2004. Eh, Lycos sucks anyway.
#
- # 0, "hotbot", \&pick_from_hotbot_text,
+ # 0, "lycos", \&pick_from_lycos_text,
- # Eh, Lycos sucks anyway.
- # 0, "lycos", \&pick_from_lycos_text,
+ # This broke in 2003, I think. I suspect Hotbot is
+ # actually the same search engine data as Lycos.
+ #
+ # 0, "hotbot", \&pick_from_hotbot_text,
);
# programs we can use to write to the root window (tried in ascending order.)
"www.nytimes.com" => 'NYT-S=18cHMIlJOn2Y1bu5xvEG3Ufuk6E1oJ.' .
'FMxWaQV0igaB5Yi/Q/guDnLeoL.pe7i1oakSb' .
'/VqfdUdb2Uo27Vzt1jmPn3cpYRlTw9',
+
+ "ircimages.com" => 'disclaimer=1',
);
# (I don't see how they did it, though!)
"alwayshotels.com" => 1, # Poisoned Lycos pretty heavily.
"nextag.com" => 1, # Poisoned Alta Vista real good.
+ "ghettodriveby.com" => 1, # Poisoned Google Images.
+ "crosswordsolver.org" => 1, # Poisoned Google Images.
+ "xona.com" => 1, # Poisoned Google Images.
+ "freepatentsonline.com" => 1, # Poisoned Google Images.
+ "herbdatanz.com" => 1, # Poisoned Google Images.
);
# site" diagnostic message.
#
my %warningless_sites = (
- "home.earthlink.net" => 1, # Lots of home pages here.
- "www.geocities.com" => 1,
+ "home.earthlink.net" => 1,
"www.angelfire.com" => 1,
"members.aol.com" => 1,
+ "img.photobucket.com" => 1,
+ "pics.livejournal.com" => 1,
+ "tinypic.com" => 1,
+ "flickr.com" => 1,
+ "staticflickr.com" => 1,
+ "pbase.com" => 1,
+ "blogger.com" => 1,
+ "multiply.com" => 1,
+ "wikimedia.org" => 1,
+ "twitpic.com" => 1,
+ "amazonaws.com" => 1,
+ "blogspot.com" => 1,
+ "photoshelter.com" => 1,
+ "myspacecdn.com" => 1,
+ "feedburner.com" => 1,
+ "wikia.com" => 1,
+ "ljplus.ru" => 1,
+ "yandex.ru" => 1,
+ "imgur.com" => 1,
+ "yfrog.com" => 1,
+ "cdninstagram.com" => 1,
"yimg.com" => 1, # This is where dailynews.yahoo.com stores
"eimg.com" => 1, # its images, so pick_from_yahoo_news_text()
# hits this every time.
+ "images.quizfarm.com" => 1, # damn those LJ quizzes...
+ "images.quizilla.com" => 1,
+ "images.quizdiva.net" => 1,
+
"driftnet" => 1, # builtin...
+ "local-directory" => 1, # builtin...
+);
+
+
+# For decoding HTML-encoded character entities to URLs.
+#
+my %entity_table = (
+ "apos" => '\'',
+ "quot" => '"', "amp" => '&', "lt" => '<',
+ "gt" => '>', "nbsp" => ' ', "iexcl" => '',
+ "cent" => "\xA2", "pound" => "\xA3", "curren" => "\xA4",
+ "yen" => "\xA5", "brvbar" => "\xA6", "sect" => "\xA7",
+ "uml" => "\xA8", "copy" => "\xA9", "ordf" => "\xAA",
+ "laquo" => "\xAB", "not" => "\xAC", "shy" => "\xAD",
+ "reg" => "\xAE", "macr" => "\xAF", "deg" => "\xB0",
+ "plusmn" => "\xB1", "sup2" => "\xB2", "sup3" => "\xB3",
+ "acute" => "\xB4", "micro" => "\xB5", "para" => "\xB6",
+ "middot" => "\xB7", "cedil" => "\xB8", "sup1" => "\xB9",
+ "ordm" => "\xBA", "raquo" => "\xBB", "frac14" => "\xBC",
+ "frac12" => "\xBD", "frac34" => "\xBE", "iquest" => "\xBF",
+ "Agrave" => "\xC0", "Aacute" => "\xC1", "Acirc" => "\xC2",
+ "Atilde" => "\xC3", "Auml" => "\xC4", "Aring" => "\xC5",
+ "AElig" => "\xC6", "Ccedil" => "\xC7", "Egrave" => "\xC8",
+ "Eacute" => "\xC9", "Ecirc" => "\xCA", "Euml" => "\xCB",
+ "Igrave" => "\xCC", "Iacute" => "\xCD", "Icirc" => "\xCE",
+ "Iuml" => "\xCF", "ETH" => "\xD0", "Ntilde" => "\xD1",
+ "Ograve" => "\xD2", "Oacute" => "\xD3", "Ocirc" => "\xD4",
+ "Otilde" => "\xD5", "Ouml" => "\xD6", "times" => "\xD7",
+ "Oslash" => "\xD8", "Ugrave" => "\xD9", "Uacute" => "\xDA",
+ "Ucirc" => "\xDB", "Uuml" => "\xDC", "Yacute" => "\xDD",
+ "THORN" => "\xDE", "szlig" => "\xDF", "agrave" => "\xE0",
+ "aacute" => "\xE1", "acirc" => "\xE2", "atilde" => "\xE3",
+ "auml" => "\xE4", "aring" => "\xE5", "aelig" => "\xE6",
+ "ccedil" => "\xE7", "egrave" => "\xE8", "eacute" => "\xE9",
+ "ecirc" => "\xEA", "euml" => "\xEB", "igrave" => "\xEC",
+ "iacute" => "\xED", "icirc" => "\xEE", "iuml" => "\xEF",
+ "eth" => "\xF0", "ntilde" => "\xF1", "ograve" => "\xF2",
+ "oacute" => "\xF3", "ocirc" => "\xF4", "otilde" => "\xF5",
+ "ouml" => "\xF6", "divide" => "\xF7", "oslash" => "\xF8",
+ "ugrave" => "\xF9", "uacute" => "\xFA", "ucirc" => "\xFB",
+ "uuml" => "\xFC", "yacute" => "\xFD", "thorn" => "\xFE",
+ "yuml" => "\xFF",
+
+ # HTML 4 entities that do not have 1:1 Latin1 mappings.
+ "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"",
+ "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->",
+ "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>",
+ "empty" => "\xD8", "minus" => "-", "lowast"=> "*", "sim" => "~",
+ "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==",
+ "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">",
+ "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y",
+ "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ",
+ "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`",
+ "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"",
+ "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">",
);
my $report_performance_interval = 60 * 15; # print some stats every 15 minutes
my $http_proxy = undef;
-my $http_timeout = 30;
+my $http_timeout = 20;
my $cvt_timeout = 10;
my $min_width = 50;
my $no_output_p = 0;
my $urls_only_p = 0;
+my $cocoa_p = 0;
+my $imagemap_base = undef;
my @pids_to_kill = (); # forked pids we should kill when we exit, if any.
my $driftnet_dir = undef;
my $default_driftnet_cmd = "driftnet -a -m 100";
+my $local_magic = 'local-directory';
+my $local_dir = undef;
+
my $wordlist;
my %rejected_urls;
# returns three values: the HTTP response line; the document headers;
# and the document body.
#
-sub get_document_1 {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document_1($$$) {
+ my ($url, $referer, $timeout) = @_;
if (!defined($timeout)) { $timeout = $http_timeout; }
if ($timeout > $http_timeout) { $timeout = $http_timeout; }
- if ($timeout <= 0) {
- LOG (($verbose_net || $verbose_load), "timed out for $url");
- return ();
- }
+ my $user_agent = "$progname/$version";
+
+ if ($url =~ m@^https?://www\.altavista\.com/@s ||
+ $url =~ m@^https?://random\.yahoo\.com/@s ||
+ $url =~ m@^https?://[^./]+\.google\.com/@s ||
+ $url =~ m@^https?://www\.livejournal\.com/@s) {
+ # block this, you turkeys.
+ $user_agent = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)' .
+ ' Gecko/20070914 Firefox/2.0.0.7';
+ }
+
+ my $ua = LWP::UserAgent->new ( agent => $user_agent,
+ keep_alive => 0,
+ env_proxy => 0,
+ );
+ $ua->proxy ('http', $http_proxy) if $http_proxy;
+ $ua->default_header ('Referer' => $referer) if $referer;
+ $ua->default_header ('Accept' => '*/*');
+ $ua->timeout($timeout) if $timeout;
+
+ if (0) {
+ $ua->add_handler ("request_send",
+ sub($$$) {
+ my ($req, $ua, $h) = @_;
+ print "\n>>[[\n"; $req->dump; print "\n]]\n";
+ return;
+ });
+ $ua->add_handler ("response_data",
+ sub($$$$) {
+ my ($req, $ua, $h, $data) = @_;
+ #print "\n<<[[\n"; print $data; print "\n]]\n";
+ return 1;
+ });
+ $ua->add_handler ("request_done",
+ sub($$$) {
+ my ($req, $ua, $h) = @_;
+ print "\n<<[[\n"; $req->dump; print "\n]]\n";
+ return;
+ });
+ }
+
+ if ($verbose_http) {
+ LOG (1, " ==> GET $url");
+ LOG (1, " ==> User-Agent: $user_agent");
+ LOG (1, " ==> Referer: $referer") if $referer;
+ }
+
+ my $res = $ua->get ($url);
+
+ my $http = ($res ? $res->status_line : '') || '';
+ my $head = ($res ? $res->headers() : '') || '';
+ $head = $head->as_string() if $head;
+ my $body = ($res && $res->is_success ? $res->decoded_content : '') || '';
LOG ($verbose_net, "get_document_1 $url " . ($referer ? $referer : ""));
- if (! ($url =~ m@^http://@i)) {
- LOG ($verbose_net, "not an HTTP URL: $url");
- return ();
+ $head =~ s/\r\n/\n/gs;
+ $head =~ s/\r/\n/gs;
+ if ($verbose_http) {
+ foreach (split (/\n/, $head)) {
+ LOG ($verbose_http, " <== $_");
+ }
}
- my ($url_proto, $dummy, $serverstring, $path) = split(/\//, $url, 4);
- $path = "" unless $path;
-
- my ($them,$port) = split(/:/, $serverstring);
- $port = 80 unless $port;
-
- my $them2 = $them;
- my $port2 = $port;
- if ($http_proxy) {
- $serverstring = $http_proxy if $http_proxy;
- $serverstring =~ s@^[a-z]+://@@;
- ($them2,$port2) = split(/:/, $serverstring);
- $port2 = 80 unless $port2;
- }
+ my @L = split(/\r\n|\r|\n/, $body);
+ my $lines = @L;
+ LOG ($verbose_http,
+ " <== [ body ]: $lines lines, " . length($body) . " bytes");
- my ($remote, $iaddr, $paddr, $proto, $line);
- $remote = $them2;
- if ($port2 =~ /\D/) { $port2 = getservbyname($port2, 'tcp') }
- if (!$port2) {
- LOG (($verbose_net || $verbose_load), "unrecognised port in $url");
- return ();
- }
- $iaddr = inet_aton($remote);
- if (!$iaddr) {
- LOG (($verbose_net || $verbose_load), "host not found: $remote");
+ if (!$http) {
+ LOG (($verbose_net || $verbose_load), "null response: $url");
return ();
}
- $paddr = sockaddr_in($port2, $iaddr);
-
-
- my $head = "";
- my $body = "";
-
- @_ =
- eval {
- local $SIG{ALRM} = sub {
- LOG (($verbose_net || $verbose_load), "timed out ($timeout) for $url");
- die "alarm\n";
- };
- alarm $timeout;
-
- $proto = getprotobyname('tcp');
- if (!socket(S, PF_INET, SOCK_STREAM, $proto)) {
- LOG (($verbose_net || $verbose_load), "socket: $!");
- return ();
- }
- if (!connect(S, $paddr)) {
- LOG (($verbose_net || $verbose_load), "connect($serverstring): $!");
- return ();
- }
-
- select(S); $| = 1; select(STDOUT);
-
- my $cookie = $cookies{$them};
-
- my $user_agent = "$progname/$version";
-
- if ($url =~ m@^http://www\.altavista\.com/@ ||
- $url =~ m@^http://random\.yahoo\.com/@) {
- # block this, you turkeys.
- $user_agent = "Mozilla/4.76 [en] (X11; U; Linux 2.2.16-22 i686; Nav)";
- }
-
- my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" .
- "Host: $them\r\n" .
- "User-Agent: $user_agent\r\n";
- if ($referer) {
- $hdrs .= "Referer: $referer\r\n";
- }
- if ($cookie) {
- my @cc = split(/\r?\n/, $cookie);
- $hdrs .= "Cookie: " . join('; ', @cc) . "\r\n";
- }
- $hdrs .= "\r\n";
-
- foreach (split('\r?\n', $hdrs)) {
- LOG ($verbose_http, " ==> $_");
- }
- print S $hdrs;
- my $http = <S> || "";
-
- # Kludge: the Yahoo Random Link is now returning as its first
- # line "Status: 301" instead of "HTTP/1.0 301 Found". Fix it...
- #
- $http =~ s@^Status:\s+(\d+)\b@HTTP/1.0 $1@i;
-
- $_ = $http;
- s/[\r\n]+$//s;
- LOG ($verbose_http, " <== $_");
-
- while (<S>) {
- $head .= $_;
- s/[\r\n]+$//s;
- last if m@^$@;
- LOG ($verbose_http, " <== $_");
-
- if (m@^Set-cookie:\s*([^;\r\n]+)@i) {
- set_cookie($them, $1)
- }
- }
-
- my $lines = 0;
- while (<S>) {
- $body .= $_;
- $lines++;
- }
-
- LOG ($verbose_http,
- " <== [ body ]: $lines lines, " . length($body) . " bytes");
-
- close S;
-
- if (!$http) {
- LOG (($verbose_net || $verbose_load), "null response: $url");
- return ();
- }
- return ( $http, $head, $body );
- };
- die if ($@ && $@ ne "alarm\n"); # propagate errors
- if ($@) {
- # timed out
- $head = undef;
- $body = undef;
- $suppress_audit = 1;
- return ();
- } else {
- # didn't
- alarm 0;
- return @_;
- }
+ return ( $http, $head, $body );
}
# returns two values: the document headers; and the document body.
# if the given URL did a redirect, returns the redirected-to document.
#
-sub get_document {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document($$;$) {
+ my ($url, $referer, $timeout) = @_;
my $start = time;
if (defined($referer) && $referer eq $driftnet_magic) {
return get_driftnet_file ($url);
}
+ if (defined($referer) && $referer eq $local_magic) {
+ return get_local_file ($url);
+ }
+
my $orig_url = $url;
my $loop_count = 0;
my $max_loop_count = 4;
$url = $location;
if ($url =~ m@^/@) {
- $referer =~ m@^(http://[^/]+)@i;
+ $referer =~ m@^(https?://[^/]+)@i;
$url = $1 . $url;
} elsif (! ($url =~ m@^[a-z]+:@i)) {
$_ = $referer;
- s@[^/]+$@@g if m@^http://[^/]+/@i;
- $_ .= "/" if m@^http://[^/]+$@i;
+ s@[^/]+$@@g if m@^https?://[^/]+/@i;
+ $_ .= "/" if m@^https?://[^/]+$@i;
$url = $_ . $url;
}
# in again, but you have to present the old cookie to get the new cookie.
# So, by doing this, the built-in cypherpunks cookie will never go "stale".
#
-sub set_cookie {
+sub set_cookie($$) {
my ($host, $cookie) = @_;
my $oc = $cookies{$host};
return unless $oc;
# given a URL and the body text at that URL, selects and returns a random
# image from it. returns () if no suitable images found.
#
-sub pick_image_from_body {
- my ( $url, $body ) = @_;
+sub pick_image_from_body($$) {
+ my ($url, $body) = @_;
my $base = $url;
$_ = $url;
# if there's at least one slash after the host, take off the last
# pathname component
- if ( m@^http://[^/]+/@io ) {
+ if ( m@^https?://[^/]+/@io ) {
$base =~ s@[^/]+$@@go;
}
# if there are no slashes after the host at all, put one on the end.
- if ( m@^http://[^/]+$@io ) {
+ if ( m@^https?://[^/]+$@io ) {
$base .= "/";
}
# randomly from the set of images on the web. All the logic here for
# rejecting some images is really a set of heuristics for rejecting
# images that are not really images: for rejecting *text* that is in
- # GIF/JPEG form. I don't want text, I want pictures, and I want the
- # content of the pictures to be randomly selected from among all the
- # available content.
+ # GIF/JPEG/PNG form. I don't want text, I want pictures, and I want
+ # the content of the pictures to be randomly selected from among all
+ # the available content.
#
# So, filtering out "dirty" pictures by looking for "dirty" keywords
# would be wrong: dirty pictures exist, like it or not, so webcollage
my %unique_urls;
foreach (split(/ *</)) {
- if ( m/^meta /i ) {
+ if ( m/^meta.*["']keywords["']/i ) {
# Likewise, reject any web pages that have a KEYWORDS meta tag
# that is too long.
#
- if (m/name ?= ?\"?keywords\"?/i &&
- m/content ?= ?\"([^\"]+)\"/) {
- my $L = length($1);
- if ($L > 1000) {
- LOG (($verbose_filter || $verbose_load),
- "excessive keywords ($L bytes) in $url: rejecting.");
- $rejected_urls{$url} = $L;
- $body = undef;
- $_ = undef;
- return ();
- } else {
- LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
- }
+ my $L = length($_);
+ if ($L > 1000) {
+ LOG (($verbose_filter || $verbose_load),
+ "excessive keywords ($L bytes) in $url: rejecting.");
+ $rejected_urls{$url} = $L;
+ $body = undef;
+ $_ = undef;
+ return ();
+ } else {
+ LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
}
- } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) {
+ } elsif (m/^ (IMG|A) \b .* (SRC|HREF) \s* = \s* ["']? (.*?) [ "'<>] /six ||
+ m/^ (LINK|META) \b .* (REL|PROPERTY) \s* = \s*
+ ["']? (image_src|og:image) ["']? /six) {
- my $was_inline = (! ( "$1" eq "a" || "$1" eq "A" ));
+ my $was_inline = (lc($1) eq 'img');
+ my $was_meta = (lc($1) eq 'link' || lc($1) eq 'meta');
my $link = $3;
+
+ # For <link rel="image_src" href="...">
+ # and <meta property="og:image" content="...">
+ #
+ if ($was_meta) {
+ next unless (m/ (HREF|CONTENT) \s* = \s* ["']? (.*?) [ "'<>] /six);
+ $link = $2;
+ }
+
my ( $width ) = m/width ?=[ \"]*(\d+)/oi;
my ( $height ) = m/height ?=[ \"]*(\d+)/oi;
$_ = $link;
if ( m@^/@o ) {
my $site;
- ( $site = $base ) =~ s@^(http://[^/]*).*@$1@gio;
+ ( $site = $base ) =~ s@^(https?://[^/]*).*@$1@gio;
$_ = "$site$link";
} elsif ( ! m@^[^/:?]+:@ ) {
$_ = "$base$link";
}
# skip non-http
- if ( ! m@^http://@io ) {
+ if ( ! m@^https?://@io ) {
next;
}
# skip non-image
- if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg)$@io ) {
+ if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@io ) {
next;
}
next;
}
+ # skip images with a URL that indicates a Yahoo thumbnail.
+ if (m@\.yimg\.com/.*/t/@) {
+ if (!$width) { $width = "?"; }
+ if (!$height) { $height = "?"; }
+ LOG ($verbose_filter, " skip yahoo thumb $_ (${width}x$height)");
+ next;
+ }
my $url = $_;
LOG ($verbose_filter,
" image $url" .
($width && $height ? " (${width}x${height})" : "") .
- ($was_inline ? " (inline)" : ""));
+ ($was_meta ? " (meta)" : $was_inline ? " (inline)" : ""));
- $urls[++$#urls] = $url;
- $unique_urls{$url}++;
- # jpegs are preferable to gifs.
- $_ = $url;
- if ( ! m@[.]gif$@io ) {
- $urls[++$#urls] = $url;
+ my $weight = 1;
+
+ if ($was_meta) {
+ $weight = 20; # meta tag images are far preferable to inline images.
+ } else {
+ if ($url !~ m@[.](gif|png)$@io ) {
+ $weight += 2; # JPEGs are preferable to GIFs and PNGs.
+ }
+ if (! $was_inline) {
+ $weight += 4; # pointers to images are preferable to inlined images.
+ }
}
- # pointers to images are preferable to inlined images.
- if ( ! $was_inline ) {
- $urls[++$#urls] = $url;
+ $unique_urls{$url}++;
+ for (my $i = 0; $i < $weight; $i++) {
$urls[++$#urls] = $url;
}
}
}
+# Given a URL and the RSS feed from that URL, pick a random image from
+# the feed. This is a lot simpler than extracting images out of a page:
+# we already know we have reasonable images, so we just pick one.
+# Returns: the real URL of the page (preferably not the RSS version),
+# and the image.
+
+sub pick_image_from_rss($$) {
+ my ( $url, $body ) = @_;
+ my @suitable = ($body =~ m/<enclosure url="(.*?)"/g);
+
+ my ($base) = ($body =~ m@<link>([^<>]+)</link>@i);
+ $base = $url unless $base;
+
+ # pick a random element of the table
+ if (@suitable) {
+ my $i = int(rand(scalar @suitable));
+ my $url = $suitable[$i];
+ LOG ($verbose_load, "picked image " .($i+1) . "/" .
+ ($#suitable+1) . ": $url");
+ return ($base, $url);
+ }
+ return;
+}
+
\f
############################################################################
#
############################################################################
-sub pick_dictionary {
+sub pick_dictionary() {
my @dicts = ("/usr/dict/words",
"/usr/share/dict/words",
- "/usr/share/lib/dict/words");
+ "/usr/share/lib/dict/words",
+ "/usr/share/dict/cracklib-small",
+ "/usr/share/dict/cracklib-words"
+ );
foreach my $f (@dicts) {
if (-f $f) {
$wordlist = $f;
# returns a random word from the dictionary
#
-sub random_word {
- my $word = 0;
- if (open (IN, "<$wordlist")) {
- my $size = (stat(IN))[7];
- my $pos = rand $size;
- if (seek (IN, $pos, 0)) {
- $word = <IN>; # toss partial line
- $word = <IN>; # keep next line
- }
- if (!$word) {
- seek( IN, 0, 0 );
- $word = <IN>;
- }
- close (IN);
- }
+sub random_word() {
- return 0 if (!$word);
+ return undef unless open (my $in, '<', $wordlist);
- $word =~ s/^[ \t\n\r]+//;
- $word =~ s/[ \t\n\r]+$//;
- $word =~ s/ys$/y/;
- $word =~ s/ally$//;
- $word =~ s/ly$//;
- $word =~ s/ies$/y/;
- $word =~ s/ally$/al/;
- $word =~ s/izes$/ize/;
- $word =~ tr/A-Z/a-z/;
+ my $size = (stat($in))[7];
+ my $word = undef;
+ my $count = 0;
- if ( $word =~ s/[ \t\n\r]/\+/g ) { # convert intra-word spaces to "+".
- $word = "\%22$word\%22"; # And put quotes (%22) around it.
+ while (1) {
+ error ("looping ($count) while reading $wordlist")
+ if (++$count > 100);
+
+ my $pos = int (rand ($size));
+ if (seek ($in, $pos, 0)) {
+ $word = <$in>; # toss partial line
+ $word = <$in>; # keep next line
}
- return $word;
+ next unless ($word);
+ next if ($word =~ m/^[-\']/);
+
+ $word = lc($word);
+ $word =~ s/^.*-//s;
+ $word =~ s/^[^a-z]+//s;
+ $word =~ s/[^a-z]+$//s;
+ $word =~ s/\'s$//s;
+ $word =~ s/ys$/y/s;
+ $word =~ s/ally$//s;
+ $word =~ s/ly$//s;
+ $word =~ s/ies$/y/s;
+ $word =~ s/ally$/al/s;
+ $word =~ s/izes$/ize/s;
+ $word =~ s/esses$/ess/s;
+ $word =~ s/(.{5})ing$/$1/s;
+
+ next if (length ($word) > 14);
+ last if ($word);
+ }
+
+ close ($in);
+
+ if ( $word =~ s/\s/\+/gs ) { # convert intra-word spaces to "+".
+ $word = "\%22$word\%22"; # And put quotes (%22) around it.
+ }
+
+ return $word;
}
-sub random_words {
- my ($or_p) = @_;
- my $sep = ($or_p ? "%20OR%20" : "%20");
- return (random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word);
+
+sub random_words($) {
+ my ($sep) = @_;
+ return (random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word());
}
-sub url_quote {
+sub url_quote($) {
my ($s) = @_;
$s =~ s|([^-a-zA-Z0-9.\@/_\r\n])|sprintf("%%%02X", ord($1))|ge;
return $s;
}
-sub url_unquote {
+sub url_unquote($) {
my ($s) = @_;
$s =~ s/[+]/ /g;
$s =~ s/%([a-z0-9]{2})/chr(hex($1))/ige;
return $s;
}
+sub html_quote($) {
+ my ($s) = @_;
+ $s =~ s/&/&/gi;
+ $s =~ s/</</gi;
+ $s =~ s/>/>/gi;
+ $s =~ s/\"/"/gi;
+ return $s;
+}
+
+sub html_unquote($) {
+ my ($s) = @_;
+ $s =~ s/(&([a-z]+);)/{ $entity_table{$2} || $1; }/gexi; # e.g., '
+ $s =~ s/(&\#(\d+);)/{ chr($2) }/gexi; # e.g., '
+ return $s;
+}
+
# Loads the given URL (a search on some search engine) and returns:
# - the total number of hits the search engine claimed it had;
# Note that this list contains all kinds of internal search engine
# junk URLs too -- caller must prune them.
#
-sub pick_from_search_engine {
+sub pick_from_search_engine($$$) {
my ( $timeout, $search_url, $words ) = @_;
$_ = $words;
my @subpages;
+ if ($body =~ m/^\{\"/s) { # Google AJAX JSON response.
+
+ my @chunks = split (/"GsearchResultClass"/, $body);
+ shift @chunks;
+ my $body2 = '';
+ my $n = 1;
+ foreach (@chunks) {
+ my ($img) = m/"unescapedUrl":"(.*?)"/si;
+ my ($url) = m/"originalContextUrl":"(.*?)"/si;
+ next unless ($img && $url);
+ $url = ("/imgres" .
+ "?imgurl=" . url_quote($img) .
+ "&imgrefurl=" . url_quote($url) .
+ "&...");
+ $body2 .= "<A HREF=\"" . html_quote($url) . "\">$n</A>\n";
+ $n++;
+ }
+ $body = $body2 if $body2;
+ }
+
my $search_count = "?";
if ($body =~ m@found (approximately |about )?(<B>)?(\d+)(</B>)? image@) {
$search_count = $3;
1 while ($search_count =~ s/^(\d+)(\d{3})/$1,$2/);
# if ($search_count eq "?" || $search_count eq "0") {
-# local *OUT;
# my $file = "/tmp/wc.html";
-# open(OUT, ">$file") || error ("writing $file: $!");
-# print OUT $body;
-# close OUT;
+# open (my $out, '>', $file) || error ("writing $file: $!");
+# print $out $body;
+# close $out;
# print STDERR blurb() . "###### wrote $file\n";
# }
my ($u) = m@<A\s.*\bHREF\s*=\s*([^>]+)>@i;
next unless $u;
- if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string
- elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token
+ if (m/\bm="\{(.*?)\}"/s) { # Bing info is inside JSON crud
+ my $json = html_unquote($1);
+ my ($href) = ($json =~ m/\bsurl:"(.*?)"/s);
+ my ($img) = ($json =~ m/\bimgurl:"(.*?)"/s);
+ $u = "$img\t$href" if ($img && $href);
+
+ } elsif ($u =~ m/^\"([^\"]*)\"/) { $u = $1 # quoted string
+ } elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; # or token
+ }
if ( $rejected_urls{$u} ) {
LOG ($verbose_filter, " pre-rejecting candidate: $u");
}
-sub depoison {
+sub depoison(@) {
my (@urls) = @_;
my @urls2 = ();
foreach (@urls) {
- my ($h) = m@^http://([^/: \t\r\n]+)@i;
+ my ($h) = m@^https?://([^/: \t\r\n]+)@i;
next unless defined($h);
# given a list of URLs, picks one at random; loads it; and returns a
# random image from it.
-# returns the url of the page loaded; the url of the image chosen;
-# and a debugging description string.
+# returns the url of the page loaded; the url of the image chosen.
#
-sub pick_image_from_pages {
+sub pick_image_from_pages($$$$@) {
my ($base, $total_hit_count, $unfiltered_link_count, $timeout, @pages) = @_;
$total_hit_count = "?" unless defined($total_hit_count);
}
\f
-############################################################################
+#############################################################################
+##
+## Pick images from random pages returned by the Yahoo Random Link
+##
+#############################################################################
+#
+## yahoorand
+#my $yahoo_random_link = "http://random.yahoo.com/fast/ryl";
#
-# Pick images from random pages returned by the Yahoo Random Link
#
-############################################################################
-
-# yahoorand
-my $yahoo_random_link = "http://random.yahoo.com/fast/ryl";
-
-
# Picks a random page; picks a random image on that page;
# returns two URLs: the page containing the image, and the image.
# Returns () if nothing found this time.
#
-sub pick_from_yahoo_random_link {
- my ( $timeout ) = @_;
-
- print STDERR "\n\n" if ($verbose_load);
- LOG ($verbose_load, "URL: $yahoo_random_link");
-
- $last_search = $yahoo_random_link; # for warnings
-
- $suppress_audit = 1;
-
- my ( $base, $body ) = get_document ($yahoo_random_link, undef, $timeout);
- if (!$base || !$body) {
- $body = undef;
- return;
- }
-
- LOG ($verbose_load, "redirected to: $base");
-
- my $img = pick_image_from_body ($base, $body);
- $body = undef;
-
- if ($img) {
- return ($base, $img);
- } else {
- return ();
- }
-}
+#sub pick_from_yahoo_random_link($) {
+# my ($timeout) = @_;
+#
+# print STDERR "\n\n" if ($verbose_load);
+# LOG ($verbose_load, "URL: $yahoo_random_link");
+#
+# $last_search = $yahoo_random_link; # for warnings
+#
+# $suppress_audit = 1;
+#
+# my ( $base, $body ) = get_document ($yahoo_random_link, undef, $timeout);
+# if (!$base || !$body) {
+# $body = undef;
+# return;
+# }
+#
+# LOG ($verbose_load, "redirected to: $base");
+#
+# my $img = pick_image_from_body ($base, $body);
+# $body = undef;
+#
+# if ($img) {
+# return ($base, $img);
+# } else {
+# return ();
+# }
+#}
\f
############################################################################
#
# Pick images from random pages returned by the Alta Vista Random Link
+# Note: this seems to have gotten a *lot* less random lately (2007).
#
############################################################################
# returns two URLs: the page containing the image, and the image.
# Returns () if nothing found this time.
#
-sub pick_from_alta_vista_random_link {
- my ( $timeout ) = @_;
+sub pick_from_alta_vista_random_link($) {
+ my ($timeout) = @_;
print STDERR "\n\n" if ($verbose_load);
LOG ($verbose_load, "URL: $alta_vista_random_link");
"&q=";
# avimages
-sub pick_from_alta_vista_images {
- my ( $timeout ) = @_;
+sub pick_from_alta_vista_images($) {
+ my ($timeout) = @_;
- my $words = random_words(0);
+ my $words = random_word();
my $page = (int(rand(9)) + 1);
my $search_url = $alta_vista_images_url . $words;
my @candidates = ();
foreach my $u (@subpages) {
- # avtext is encoding their URLs now.
- next unless ($u =~ m@^/r.*\&r=([^&]+).*@);
- $u = url_unquote($1);
+ # avimages is encoding their URLs now.
+ next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi);
+ $u = url_unquote($u);
- next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
+ next unless ($u =~ m@^https?://@i); # skip non-HTTP or relative URLs
next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins
+ next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots?
next if ($u =~ m@[/.]doubleclick\.net\b@i); # you cretins
next if ($u =~ m@[/.]clicktomarket\.com\b@i); # more cretins
\f
############################################################################
#
-# Pick images by feeding random words into Google Image Search.
-# By Charles Gales <gales@us.ibm.com>
+# Pick images from Aptix security cameras
+# Cribbed liberally from google image search code.
+# By Jason Sullivan <jasonsul@us.ibm.com>
#
############################################################################
+my $aptix_images_url = ("http://www.google.com/search" .
+ "?q=inurl:%22jpg/image.jpg%3Fr%3D%22");
-my $google_images_url = "http://images.google.com/images" .
- "?site=images" . # photos
- "&btnG=Search" . # graphics
- "&safe=off" . # no screening
- "&imgsafe=off" .
- "&q=";
-
-# googleimgs
-sub pick_from_google_images {
- my ( $timeout ) = @_;
+# securitycam
+sub pick_from_security_camera($) {
+ my ($timeout) = @_;
- my $words = random_word; # only one word for Google
my $page = (int(rand(9)) + 1);
- my $num = 20; # 20 images per page
- my $search_url = $google_images_url . $words;
+ my $num = 20; # 20 images per page
+ my $search_url = $aptix_images_url;
if ($page > 1) {
$search_url .= "&start=" . $page*$num; # page number
- $search_url .= "&num=" . $num; #images per page
+ $search_url .= "&num=" . $num; #images per page
}
my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ pick_from_search_engine ($timeout, $search_url, '');
my @candidates = ();
+ my %referers;
foreach my $u (@subpages) {
- next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this
- next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins
+ next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins (most links)
+ next unless ($u =~ m@jpg/image.jpg\?r=@i); # All pics contain this
- if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) {
- my $urlf = $2;
- LOG ($verbose_filter, " candidate: $urlf");
- push @candidates, $urlf;
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ $referers{$u} = $u;
}
- }
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
\f
############################################################################
#
-# Pick images by feeding random *numbers* into Google Image Search.
-# By jwz, suggested by from Ian O'Donnell.
+# Pick images by feeding random words into Google Image Search.
+# By Charles Gales <gales@us.ibm.com>
#
############################################################################
-# googlenums
-sub pick_from_google_image_numbers {
- my ( $timeout ) = @_;
-
- my $max = 9999;
- my $number = int(rand($max));
-
- $number = sprintf("%04d", $number)
- if (rand() < 0.3);
+my $google_images_url = "http://ajax.googleapis.com/ajax/services/" .
+ "search/images" .
+ "?v=1.0" .
+ "&rsz=large" .
+ "&q=";
- my $words = "$number";
- my $page = (int(rand(40)) + 1);
- my $num = 20; # 20 images per page
- my $search_url = $google_images_url . $words;
+# googleimgs
+sub pick_from_google_images($;$$) {
+ my ($timeout, $words, $max_page) = @_;
- if ($page > 1) {
- $search_url .= "&start=" . $page*$num; # page number
- $search_url .= "&num=" . $num; #images per page
+ if (!defined($words)) {
+ $words = random_word(); # only one word for Google
}
+ my $off = int(rand(40));
+ my $search_url = $google_images_url . $words . "&start=" . $off;
+
my ($search_hit_count, @subpages) =
pick_from_search_engine ($timeout, $search_url, $words);
next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this
next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins
- if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) {
+ $u = html_unquote($u);
+ if ($u =~ m@^/imgres\?imgurl=(.*?)&imgrefurl=(.*?)\&@) {
my $ref = $2;
- my $img = "http://$1";
+ my $img = $1;
+ $ref = url_decode($ref);
+ $img = url_decode($img);
+
+ $img = "http://$img" unless ($img =~ m/^https?:/i);
LOG ($verbose_filter, " candidate: $ref");
push @candidates, $img;
\f
############################################################################
#
-# Pick images by feeding random words into Alta Vista Text Search
+# Pick images by feeding random numbers into Google Image Search.
+# By jwz, suggested by Ian O'Donnell.
#
############################################################################
-my $alta_vista_url = "http://www.altavista.com/web/results" .
- "?pg=aq" .
- "&aqmode=s" .
- "&filetype=html" .
- "&sc=on" . # "site collapse"
- "&nbq=50" .
- "&aqo=";
-
-# avtext
-sub pick_from_alta_vista_text {
- my ( $timeout ) = @_;
-
- my $words = random_words(0);
- my $page = (int(rand(9)) + 1);
- my $search_url = $alta_vista_url . $words;
-
- if ($page > 1) {
- $search_url .= "&pgno=" . $page;
- $search_url .= "&stq=" . (($page-1) * 10);
- }
-
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
-
- my @candidates = ();
- foreach my $u (@subpages) {
+# googlenums
+sub pick_from_google_image_numbers($) {
+ my ($timeout) = @_;
- # Those altavista fuckers are playing really nasty redirection games
- # these days: the filter your clicks through their site, but use
- # onMouseOver to make it look like they're not! Well, it makes it
- # easier for us to identify search results...
- #
- next unless ($u =~ m@^/r.*\&r=([^&]+).*@);
- $u = url_unquote($1);
+ my $max = 9999;
+ my $number = int(rand($max));
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
- }
+ $number = sprintf("%04d", $number)
+ if (rand() < 0.3);
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
+ pick_from_google_images ($timeout, "$number");
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random digital camera file names into
+# Google Image Search.
+# By jwz, inspired by the excellent Random Personal Picture Finder
+# at http://www.diddly.com/random/
+#
+############################################################################
+
+my @photomakers = (
+ #
+ # Common digital camera file name formats, as described at
+ # http://www.diddly.com/random/about.html
+ #
+ sub { sprintf ("dcp%05d.jpg", int(rand(4000))); }, # Kodak
+ sub { sprintf ("dsc%05d.jpg", int(rand(4000))); }, # Nikon
+ sub { sprintf ("dscn%04d.jpg", int(rand(4000))); }, # Nikon
+ sub { sprintf ("mvc-%03d.jpg", int(rand(999))); }, # Sony Mavica
+ sub { sprintf ("mvc%05d.jpg", int(rand(9999))); }, # Sony Mavica
+ sub { sprintf ("P101%04d.jpg", int(rand(9999))); }, # Olympus w/ date=101
+ sub { sprintf ("P%x%02d%04d.jpg", # Olympus
+ int(rand(0xC)), int(rand(30))+1,
+ rand(9999)); },
+ sub { sprintf ("IMG_%03d.jpg", int(rand(999))); }, # ?
+ sub { sprintf ("IMAG%04d.jpg", int(rand(9999))); }, # RCA and Samsung
+ sub { my $n = int(rand(9999)); # Canon
+ sprintf ("1%02d-%04d.jpg", int($n/100), $n); },
+ sub { my $n = int(rand(9999)); # Canon
+ sprintf ("1%02d-%04d_IMG.jpg",
+ int($n/100), $n); },
+ sub { sprintf ("IMG_%04d.jpg", int(rand(9999))); }, # Canon
+ sub { sprintf ("dscf%04d.jpg", int(rand(9999))); }, # Fuji Finepix
+ sub { sprintf ("pdrm%04d.jpg", int(rand(9999))); }, # Toshiba PDR
+ sub { sprintf ("IM%06d.jpg", int(rand(9999))); }, # HP Photosmart
+ sub { sprintf ("EX%06d.jpg", int(rand(9999))); }, # HP Photosmart
+# sub { my $n = int(rand(3)); # Kodak DC-40,50,120
+# sprintf ("DC%04d%s.jpg", int(rand(9999)),
+# $n == 0 ? 'S' : $n == 1 ? 'M' : 'L'); },
+ sub { sprintf ("pict%04d.jpg", int(rand(9999))); }, # Minolta Dimage
+ sub { sprintf ("P%07d.jpg", int(rand(9999))); }, # Kodak DC290
+# sub { sprintf ("%02d%02d%04d.jpg", # Casio QV3000, QV4000
+# int(rand(12))+1, int(rand(31))+1,
+# int(rand(999))); },
+# sub { sprintf ("%02d%x%02d%04d.jpg", # Casio QV7000
+# int(rand(6)), # year
+# int(rand(12))+1, int(rand(31))+1,
+# int(rand(999))); },
+ sub { sprintf ("IMGP%04d.jpg", int(rand(9999))); }, # Pentax Optio S
+ sub { sprintf ("PANA%04d.jpg", int(rand(9999))); }, # Panasonic vid still
+ sub { sprintf ("HPIM%04d.jpg", int(rand(9999))); }, # HP Photosmart
+ sub { sprintf ("PCDV%04d.jpg", int(rand(9999))); }, # ?
+ );
+
+
+# googlephotos
+sub pick_from_google_image_photos($) {
+ my ($timeout) = @_;
+
+ my $i = int(rand($#photomakers + 1));
+ my $fn = $photomakers[$i];
+ my $file = &$fn;
+ #$file .= "%20filetype:jpg";
+
+ pick_from_google_images ($timeout, $file);
+}
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Google Image Search.
+# By the way: fuck Microsoft.
+#
+############################################################################
+
+my $bing_images_url = "http://www.bing.com/images/async?q=";
+
+
+# bingimgs
+sub pick_from_bing_images($;$$) {
+ my ($timeout, $words, $max_page) = @_;
+
+ if (!defined($words)) {
+ $words = random_word(); # only one word for Bing
+ }
+
+ my $off = int(rand(300));
+ my $search_url = $bing_images_url . $words . "&first=" . $off;
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ my %referers;
+ foreach my $u (@subpages) {
+ my ($img, $ref) = ($u =~ m/^(.*?)\t(.*)$/s);
+ next unless $img;
+ LOG ($verbose_filter, " candidate: $ref");
+ push @candidates, $img;
+ $referers{$img} = $ref;
+ }
+
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
+
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random numbers into Bing Image Search.
+#
+############################################################################
+
+# bingnums
+sub pick_from_bing_image_numbers($) {
+ my ($timeout) = @_;
+
+ my $max = 9999;
+ my $number = int(rand($max));
+
+ $number = sprintf("%04d", $number)
+ if (rand() < 0.3);
+
+ pick_from_bing_images ($timeout, "$number");
+}
+
+\f
+############################################################################
+#
+# Pick images by feeding random numbers into Bing Image Search.
+#
+############################################################################
+
+# bingphotos
+sub pick_from_bing_image_photos($) {
+ my ($timeout) = @_;
+
+ my $i = int(rand($#photomakers + 1));
+ my $fn = $photomakers[$i];
+ my $file = &$fn;
+
+ pick_from_bing_images ($timeout, $file);
+}
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Alta Vista Text Search
+#
+############################################################################
+
+
+my $alta_vista_url = "http://www.altavista.com/web/results" .
+ "?pg=aq" .
+ "&aqmode=s" .
+ "&filetype=html" .
+ "&sc=on" . # "site collapse"
+ "&nbq=50" .
+ "&aqo=";
+
+# avtext
+sub pick_from_alta_vista_text($) {
+ my ($timeout) = @_;
+
+ my $words = random_words('%20');
+ my $page = (int(rand(9)) + 1);
+ my $search_url = $alta_vista_url . $words;
+
+ if ($page > 1) {
+ $search_url .= "&pgno=" . $page;
+ $search_url .= "&stq=" . (($page-1) * 10);
+ }
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Those altavista fuckers are playing really nasty redirection games
+ # these days: the filter your clicks through their site, but use
+ # onMouseOver to make it look like they're not! Well, it makes it
+ # easier for us to identify search results...
+ #
+ next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi);
+ $u = url_unquote($u);
+
+ next unless ($u =~ m@^https?://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins
+ next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots?
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
}
#
############################################################################
-my $hotbot_search_url =("http://hotbot.lycos.com/default.asp" .
- "?ca=w" .
- "&descriptiontype=0" .
- "&imagetoggle=1" .
- "&matchmode=any" .
- "&nummod=2" .
- "&recordcount=50" .
- "&sitegroup=1" .
- "&stem=1" .
- "&cobrand=undefined" .
- "&query=");
+my $hotbot_search_url =("http://hotbot.lycos.com/default.asp" .
+ "?ca=w" .
+ "&descriptiontype=0" .
+ "&imagetoggle=1" .
+ "&matchmode=any" .
+ "&nummod=2" .
+ "&recordcount=50" .
+ "&sitegroup=1" .
+ "&stem=1" .
+ "&cobrand=undefined" .
+ "&query=");
+
+sub pick_from_hotbot_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $hotbot_search_url; # for warnings
+
+ # lycos seems to always give us back dictionaries and word lists if
+ # we search for more than one word...
+ #
+ my $words = random_word();
+
+ my $start = int(rand(8)) * 10 + 1;
+ my $search_url = $hotbot_search_url . $words . "&first=$start&page=more";
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Hotbot plays redirection games too
+ # (not any more?)
+# next unless ($u =~ m@/director.asp\?.*\btarget=([^&]+)@);
+# $u = url_decode($1);
+
+ next unless ($u =~ m@^https?://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]hotbot\.com\b@i); # skip hotbot builtins
+ next if ($u =~ m@[/.]lycos\.com\b@i); # skip hotbot builtins
+ next if ($u =~ m@[/.]inktomi\.com\b@i); # skip hotbot builtins
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Lycos
+#
+############################################################################
+
+my $lycos_search_url = "http://search.lycos.com/default.asp" .
+ "?lpv=1" .
+ "&loc=searchhp" .
+ "&tab=web" .
+ "&query=";
+
+sub pick_from_lycos_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $lycos_search_url; # for warnings
+
+ # lycos seems to always give us back dictionaries and word lists if
+ # we search for more than one word...
+ #
+ my $words = random_word();
+
+ my $start = int(rand(8)) * 10 + 1;
+ my $search_url = $lycos_search_url . $words . "&first=$start&page=more";
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Lycos plays redirection games.
+ # (not any more?)
+# next unless ($u =~ m@^https?://click.lycos.com/director.asp
+# .*
+# \btarget=([^&]+)
+# .*
+# @x);
+# $u = url_decode($1);
+
+ next unless ($u =~ m@^https?://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]hotbot\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]lycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]terralycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]inktomi\.com\b@i); # skip lycos builtins
+
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into news.yahoo.com
+#
+############################################################################
+
+my $yahoo_news_url = "http://news.search.yahoo.com/search/news" .
+ "?c=news_photos" .
+ "&p=";
+
+# yahoonews
+sub pick_from_yahoo_news_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $yahoo_news_url; # for warnings
+
+ my $words = random_word();
+ my $search_url = $yahoo_news_url . $words;
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # de-redirectize the URLs
+ $u =~ s@^https?://rds\.yahoo\.com/.*-http%3A@http:@s;
+
+ # only accept URLs on Yahoo's news site
+ next unless ($u =~ m@^https?://dailynews\.yahoo\.com/@i ||
+ $u =~ m@^https?://story\.news\.yahoo\.com/@i);
+ next unless ($u =~ m@&u=/@);
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images from LiveJournal's list of recently-posted images.
+#
+############################################################################
+
+my $livejournal_img_url = "http://www.livejournal.com/stats/latest-img.bml";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of LiveJournal, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $lj_cache_size = 1000;
+my @lj_cache = (); # fifo, for ordering by age
+my %lj_cache = (); # hash, for detecting dups
+
+# livejournal
+sub pick_from_livejournal_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $livejournal_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($livejournal_img_url, undef, $timeout);
+
+ # Often the document comes back empty. If so, just use the cache.
+ # return () unless $body;
+ $body = '' unless defined($body);
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<recent-image)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+ next unless (m/^<recent-image\b/);
+ next unless (m/\bIMG=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $img = html_unquote ($1);
+
+ next if ($lj_cache{$img}); # already have it
+
+ next unless (m/\bURL=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $page = html_unquote ($1);
+ my @pair = ($img, $page);
+ LOG ($verbose_filter, " candidate: $img");
+ push @lj_cache, \@pair;
+ $lj_cache{$img} = \@pair;
+ }
+
+ return () if ($#lj_cache == -1);
+
+ my $n = $#lj_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$lj_cache[$i]};
+
+ # delete this one from @lj_cache and from %lj_cache.
+ #
+ @lj_cache = ( @lj_cache[0 .. $i-1],
+ @lj_cache[$i+1 .. $#lj_cache] );
+ delete $lj_cache{$img};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#lj_cache >= $lj_cache_size) {
+ my $pairP = shift @lj_cache;
+ my $img = $pairP->[0];
+ delete $lj_cache{$img};
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from ircimages.com (images that have been in the /topic of
+# various IRC channels.)
+#
+############################################################################
+
+my $ircimages_url = "http://ircimages.com/";
+
+# ircimages
+sub pick_from_ircimages($) {
+ my ($timeout) = @_;
+
+ $last_search = $ircimages_url; # for warnings
+
+ my $n = int(rand(2900));
+ my $search_url = $ircimages_url . "page-$n";
+
+ my ( $base, $body ) = get_document ($search_url, undef, $timeout);
+ return () unless $body;
+
+ my @candidates = ();
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<A)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+
+ my ($u) = m@<A\s.*\bHREF\s*=\s*([^>]+)>@i;
+ next unless $u;
+
+ if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string
+ elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token
+
+ next unless ($u =~ m/^https?:/i);
+ next if ($u =~ m@^https?://(searchirc\.com\|ircimages\.com)@i);
+ next unless ($u =~ m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@i);
+
+ LOG ($verbose_http, " HREF: $u");
+ push @candidates, $u;
+ }
+
+ LOG ($verbose_filter, "" . $#candidates+1 . " links on $search_url");
+
+ return () if ($#candidates == -1);
+
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#candidates+1) .
+ ": $img");
+
+ $search_url = $img; # hmm...
+ return ($search_url, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Twitpic's list of recently-posted images.
+#
+############################################################################
+
+my $twitpic_img_url = "http://twitpic.com/public_timeline/feed.rss";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitpic, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitpic_cache_size = 1000;
+my @twitpic_cache = (); # fifo, for ordering by age
+my %twitpic_cache = (); # hash, for detecting dups
+
+# twitpic
+sub pick_from_twitpic_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitpic_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitpic_img_url, undef, $timeout);
+
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<item)\b/\n$1/gsi;
+
+ my @items = split (/\n/, $body);
+ shift @items;
+ foreach (@items) {
+ next unless (m@<link>([^<>]*)</link>@si);
+ my $page = html_unquote ($1);
+
+ $page =~ s@/$@@s;
+ $page .= '/full';
+
+ next if ($twitpic_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page");
+ push @twitpic_cache, $page;
+ $twitpic_cache{$page} = $page;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitpic_cache == -1);
+
+ my $n = $#twitpic_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitpic_cache[$i];
+
+ # delete this one from @twitpic_cache and from %twitpic_cache.
+ #
+ @twitpic_cache = ( @twitpic_cache[0 .. $i-1],
+ @twitpic_cache[$i+1 .. $#twitpic_cache] );
+ delete $twitpic_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitpic_cache >= $twitpic_cache_size) {
+ my $page = shift @twitpic_cache;
+ delete $twitpic_cache{$page};
+ }
+
+ ( $base, $body ) = get_document ($page, undef, $timeout);
+ my $img = undef;
+ $body = '' unless defined($body);
+
+ foreach (split (/<img\s+/, $body)) {
+ my ($src) = m/\bsrc=[\"\'](.*?)[\"\']/si;
+ next unless $src;
+ next if m@/js/@s;
+ next if m@/images/@s;
+
+ $img = $src;
+
+ $img = "http:$img" if ($img =~ m@^//@s); # Oh come on
+
+ # Sometimes these images are hosted on twitpic, sometimes on Amazon.
+ if ($img =~ m@^/@) {
+ $base =~ s@^(https?://[^/]+)/.*@$1@s;
+ $img = $base . $img;
+ }
+ last;
+ }
+
+ if (!$img) {
+ LOG ($verbose_load, "no matching images on $page\n");
+ return ();
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Twitter's list of recently-posted updates.
+#
+############################################################################
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitter, the page
+# of images only updates once a minute; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitter_img_url = "http://api.twitter.com/1/statuses/" .
+ "public_timeline.json" .
+ "?include_entities=true" .
+ "&include_rts=true" .
+ "&count=200";
+
+my $twitter_cache_size = 1000;
+
+my @twitter_cache = (); # fifo, for ordering by age
+my %twitter_cache = (); # hash, for detecting dups
+
+
+# twitter
+sub pick_from_twitter_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitter_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitter_img_url, undef, $timeout);
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/[\r\n]+/ /gs;
+
+ # Parsing JSON is a pain in the ass. So we halfass it as usual.
+ $body =~ s/^\[|\]$//s;
+ $body =~ s/(\[.*?\])/{ $_ = $1; s@\},@\} @gs; $_; }/gsexi;
+ my @items = split (/\},\{/, $body);
+ foreach (@items) {
+ my ($name) = m@"screen_name":"([^\"]+)"@si;
+ my ($img) = m@"media_url":"([^\"]+)"@si;
+ my ($page) = m@"display_url":"([^\"]+)"@si;
+ next unless ($name && $img && $page);
+ foreach ($img, $page) {
+ s/\\//gs;
+ $_ = "http://$_" unless (m/^http/si);
+ }
+
+ next if ($twitter_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page - $img");
+ push @twitter_cache, $page;
+ $twitter_cache{$page} = $img;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitter_cache == -1);
+
+ my $n = $#twitter_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitter_cache[$i];
+ my $url = $twitter_cache{$page};
+
+ # delete this one from @twitter_cache and from %twitter_cache.
+ #
+ @twitter_cache = ( @twitter_cache[0 .. $i-1],
+ @twitter_cache[$i+1 .. $#twitter_cache] );
+ delete $twitter_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitter_cache >= $twitter_cache_size) {
+ my $page = shift @twitter_cache;
+ delete $twitter_cache{$page};
+ }
+
+ LOG ($verbose_load, "picked page $url");
+
+ $suppress_audit = 1;
+
+ return ($page, $url);
+}
+
+\f
+############################################################################
+#
+# Pick images from Flickr's page of recently-posted photos.
+#
+############################################################################
+
+my $flickr_img_url = "http://www.flickr.com/explore/";
+
+# Like LiveJournal, the Flickr page of images tends to update slowly,
+# so remember the last N entries on it and randomly select from those.
+
+# I know that Flickr has an API (http://www.flickr.com/services/api/)
+# but it was easy enough to scrape the HTML, so I didn't bother exploring.
+
+my $flickr_cache_size = 1000;
+my @flickr_cache = (); # fifo, for ordering by age
+my %flickr_cache = (); # hash, for detecting dups
+
+
+# flickr_recent
+sub pick_from_flickr_recent($) {
+ my ($timeout) = @_;
+
+ my $start = 16 * int(rand(100));
+
+ $last_search = $flickr_img_url; # for warnings
+ $last_search .= "?start=$start" if ($start > 0);
+
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+
+ # If the document comes back empty. just use the cache.
+ # return () unless $body;
+ $body = '' unless defined($body);
+
+ my $count = 0;
+ my $count2 = 0;
+
+ if ($body =~ m@{ *"_data": \[ ( .*? \} ) \]@six) {
+ $body = $1;
+ } else {
+ LOG ($verbose_load, "flickr unparsable: $last_search");
+ return ();
+ }
+
+ $body =~ s/[\r\n]/ /gs;
+ $body =~ s/(\},) *(\{)/$1\n$2/gs; # "_flickrModelRegistry"
-sub pick_from_hotbot_text {
- my ( $timeout ) = @_;
+ foreach my $chunk (split (/\n/, $body)) {
+ my ($img) = ($chunk =~ m@"displayUrl": *"(.*?)"@six);
+ next unless defined ($img);
+ $img =~ s/\\//gs;
+ $img = "//" unless ($img =~ m@^/@s);
+ $img = "http:$img" unless ($img =~ m/^http/s);
- # lycos seems to always give us back dictionaries and word lists if
- # we search for more than one word...
- #
- my $words = random_word();
+ my ($user) = ($chunk =~ m/"pathAlias": *"(.*?)"/si);
+ next unless defined ($user);
- my $start = int(rand(8)) * 10 + 1;
- my $search_url = $hotbot_search_url . $words . "&first=$start&page=more";
+ my ($id) = ($img =~ m@/\d+/(\d+)_([\da-f]+)_@si);
+ my ($page) = "https://www.flickr.com/photos/$user/$id/";
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ # $img =~ s/_[a-z](\.[a-z\d]+)$/$1/si; # take off "thumb" suffix
- my @candidates = ();
- foreach my $u (@subpages) {
+ $count++;
+ next if ($flickr_cache{$img}); # already have it
- # Hotbot plays redirection games too
- next unless ($u =~ m@/director.asp\?.*\btarget=([^&]+)@);
- $u = url_decode($1);
+ my @pair = ($img, $page, $start);
+ LOG ($verbose_filter, " candidate: $img");
+ push @flickr_cache, \@pair;
+ $flickr_cache{$img} = \@pair;
+ $count2++;
+ }
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ return () if ($#flickr_cache == -1);
+
+ my $n = $#flickr_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$flickr_cache[$i]};
+
+ # delete this one from @flickr_cache and from %flickr_cache.
+ #
+ @flickr_cache = ( @flickr_cache[0 .. $i-1],
+ @flickr_cache[$i+1 .. $#flickr_cache] );
+ delete $flickr_cache{$img};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#flickr_cache >= $flickr_cache_size) {
+ my $pairP = shift @flickr_cache;
+ my $img = $pairP->[0];
+ delete $flickr_cache{$img};
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+ return ($page, $img);
+}
\f
############################################################################
#
-# Pick images by feeding random words into Lycos
+# Pick images from a random RSS feed on Flickr.
#
############################################################################
-my $lycos_search_url = "http://search.lycos.com/default.asp" .
- "?lpv=1" .
- "&loc=searchhp" .
- "&tab=web" .
- "&query=";
-
-sub pick_from_lycos_text {
- my ( $timeout ) = @_;
+my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne".
+ "?format=rss_200_enc&tagmode=any&tags=");
- # lycos seems to always give us back dictionaries and word lists if
- # we search for more than one word...
- #
- my $words = random_word();
+# Picks a random RSS feed; picks a random image from that feed;
+# returns 2 URLs: the page containing the image, and the image.
+# Mostly by Joe Mcmahon <mcmahon@yahoo-inc.com>
+#
+# flickr_random
+sub pick_from_flickr_random($) {
+ my $timeout = shift;
- my $start = int(rand(8)) * 10 + 1;
- my $search_url = $lycos_search_url . $words . "&first=$start&page=more";
+ my $words = random_words(',');
+ my $rss = $flickr_rss_base . $words;
+ $last_search = $rss;
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ $_ = $words;
+ s/,/ /g;
- my @candidates = ();
- foreach my $u (@subpages) {
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "words: $_");
+ LOG ($verbose_load, "URL: $last_search");
- # Lycos plays redirection games.
- next unless ($u =~ m@^http://click.lycos.com/director.asp
- .*
- \btarget=([^&]+)
- .*
- @x);
- $u = url_decode($1);
+ $suppress_audit = 1;
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ my $img;
+ ($base, $img) = pick_image_from_rss ($base, $body);
+ $body = undef;
+ return () unless defined ($img);
+ LOG ($verbose_load, "redirected to: $base");
+ return ($base, $img);
+}
\f
############################################################################
#
-# Pick images by feeding random words into news.yahoo.com
+# Pick random images from Instagram.
#
############################################################################
-my $yahoo_news_url = "http://search.news.yahoo.com/search/news" .
- "?a=1" .
- "&c=news_photos" .
- "&s=-%24s%2C-date" .
- "&n=100" .
- "&o=o" .
- "&2=" .
- "&3=" .
- "&p=";
-
-# yahoonews
-sub pick_from_yahoo_news_text {
- my ( $timeout ) = @_;
-
- my $words = random_words(0);
- my $search_url = $yahoo_news_url . $words;
+my $instagram_url_base = "https://api.instagram.com/v1/media/popular";
+
+# instagram_random
+sub pick_from_instagram($) {
+ my $timeout = shift;
+
+ # Liberated access tokens.
+ # jsdo.it search for: instagram client_id
+ # Google search for: instagram "&client_id=" site:jsfiddle.net
+ my @tokens = ('b59fbe4563944b6c88cced13495c0f49', # gramfeed.com
+ 'fa26679250df49c48a33fbcf30aae989', # instac.at
+ 'd9494686198d4dfeb954979a3e270e5e', # iconosquare.com
+ '793ef48bb18e4197b61afce2d799b81c', # jsdo.it
+ '67b8a3e0073449bba70600d0fc68e6cb', # jsdo.it
+ '26a098e0df4d4b9ea8b4ce6c505b7742', # jsdo.it
+ '2437cbcd906a4c10940f990d283d3cd5', # jsdo.it
+ '191c7d7d5312464cbd92134f36ffdab5', # jsdo.it
+ 'acfec809437b4340b2c38f66503af774', # jsdo.it
+ 'e9f77604a3a24beba949c12d18130988', # jsdo.it
+ '2cd7bcf68ae346529770073d311575b3', # jsdo.it
+ '830c600fe8d742e2ab3f3b94f9bb22b7', # jsdo.it
+ '55865a0397ad41e5997dd95ef4df8da1', # jsdo.it
+ '192a5742f3644ea8bed1d25e439286a8', # jsdo.it
+ '38ed1477e7a44595861b8842cdb8ba23', # jsdo.it
+ 'e52f79f645f54488ad0cc47f6f55ade6', # jsfiddle.net
+ );
+
+ my $tok = $tokens[int(rand($#tokens+1))];
+ $last_search = $instagram_url_base . "?client_id=" . $tok;
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "URL: $last_search");
- my @candidates = ();
- foreach my $u (@subpages) {
- # only accept URLs on Yahoo's news site
- next unless ($u =~ m@^http://dailynews\.yahoo\.com/@i ||
- $u =~ m@^http://story\.news\.yahoo\.com/@i);
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
+ }
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ $body =~ s/("link")/\001$1/gs;
+ my @chunks = split(/\001/, $body);
+ shift @chunks;
+ my @urls = ();
+ foreach (@chunks) {
+ s/\\//gs;
+ my ($url) = m/"link":\s*"(.*?)"/s;
+ my ($img) = m/"standard_resolution":\{"url":\s*"(.*?)"/s;
+ ($img) = m/"url":\s*"(.*?)"/s unless $url;
+ next unless ($url && $img);
+ push @urls, [ $url, $img ];
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ if ($#urls < 0) {
+ LOG ($verbose_load, "no images on $last_search");
+ return ();
+ }
+ my $i = int(rand($#urls+1));
+ my ($url, $img) = @{$urls[$i]};
+ LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#urls+1) . ": $url");
+ return ($url, $img);
+}
\f
############################################################################
############################################################################
# driftnet
-sub pick_from_driftnet {
- my ( $timeout ) = @_;
+sub pick_from_driftnet($) {
+ my ($timeout) = @_;
my $id = $driftnet_magic;
my $dir = $driftnet_dir;
$last_search = $id;
while ($now = time, $now < $start + $timeout) {
- local *DIR;
- opendir (DIR, $dir) || error ("$dir: $!");
- while (my $file = readdir(DIR)) {
+ opendir (my $dir, $dir) || error ("$dir: $!");
+ while (my $file = readdir($dir)) {
next if ($file =~ m/^\./);
$file = "$dir/$file";
- closedir DIR;
+ closedir ($dir);
LOG ($verbose_load, "picked file $file ($id)");
return ($id, $file);
}
- closedir DIR;
+ closedir ($dir);
}
LOG (($verbose_net || $verbose_load), "timed out for $id");
return ();
}
-sub get_driftnet_file {
+sub get_driftnet_file($) {
my ($file) = @_;
error ("\$driftnet_dir unset?") unless ($driftnet_dir);
my $id = $driftnet_magic;
- my $re = qr/$driftnet_dir/;
error ("$id: $file not in $driftnet_dir?")
- unless ($file =~ m@^$re@o);
+ unless ($file =~ m@^\Q$driftnet_dir@o);
- local *IN;
- open (IN, $file) || error ("$id: $file: $!");
+ open (my $in, '<', $file) || error ("$id: $file: $!");
my $body = '';
- while (<IN>) { $body .= $_; }
- close IN || error ("$id: $file: $!");
+ local $/ = undef; # read entire file
+ $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
unlink ($file) || error ("$id: $file: rm: $!");
return ($id, $body);
}
-sub spawn_driftnet {
+sub spawn_driftnet($) {
my ($cmd) = @_;
# make a directory to use.
unless (1 == kill (0, $pid));
}
+# local-directory
+sub pick_from_local_dir($) {
+ my ($timeout) = @_;
+
+ my $id = $local_magic;
+ $last_search = $id;
+
+ my $dir = $local_dir;
+ error ("\$local_dir unset?") unless ($dir);
+ $dir =~ s@/+$@@;
+
+ error ("$dir unreadable") unless (-d "$dir/.");
+
+ my $v = ($verbose_exec ? "-v" : "");
+ my $pick = `xscreensaver-getimage-file $v "$dir"`;
+ $pick =~ s/\s+$//s;
+ $pick = "$dir/$pick" unless ($pick =~ m@^/@s); # relative path
+
+ LOG ($verbose_load, "picked file $pick ($id)");
+ return ($id, $pick);
+}
+
+
+sub get_local_file($) {
+ my ($file) = @_;
+
+ error ("\$local_dir unset?") unless ($local_dir);
+
+ my $id = $local_magic;
+ error ("$id: $file not in $local_dir?")
+ unless ($file =~ m@^\Q$local_dir@o);
+
+ open (my $in, '<', $file) || error ("$id: $file: $!");
+ local $/ = undef; # read entire file
+ my $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
+ return ($id, $body);
+}
+
+
\f
############################################################################
#
# Returns () if nothing found this time.
#
-sub pick_image {
- my ( $timeout ) = @_;
+sub pick_image(;$) {
+ my ($timeout) = @_;
$current_state = "select";
$load_method = "none";
#
############################################################################
-sub timestr {
+sub timestr() {
return strftime ("%H:%M:%S: ", localtime);
}
-sub blurb {
+sub blurb() {
return "$progname: " . timestr() . "$current_state: ";
}
-sub error {
+sub error($) {
my ($err) = @_;
print STDERR blurb() . "$err\n";
exit 1;
}
+sub stacktrace() {
+ my $i = 1;
+ print STDERR "$progname: stack trace:\n";
+ while (1) {
+ my ($package, $filename, $line, $subroutine) = caller($i++);
+ last unless defined($package);
+ $filename =~ s@^.*/@@;
+ print STDERR " $filename#$line, $subroutine\n";
+ }
+}
+
my $lastlog = "";
-sub clearlog {
+sub clearlog() {
$lastlog = "";
}
-sub showlog {
+sub showlog() {
my $head = "$progname: DEBUG: ";
foreach (split (/\n/, $lastlog)) {
print STDERR "$head$_\n";
$lastlog = "";
}
-sub LOG {
+sub LOG($$) {
my ($print, $msg) = @_;
my $blurb = timestr() . "$current_state: ";
$lastlog .= "$blurb$msg\n";
my %stats_elapsed;
my $last_state = undef;
-sub record_attempt {
+sub record_attempt($) {
my ($name) = @_;
if ($last_state) {
$suppress_audit = 0;
}
-sub record_success {
+sub record_success($$$) {
my ($name, $url, $base) = @_;
if (defined($stats_successes{$name})) {
$stats_successes{$name}++;
}
-sub record_failure {
+sub record_failure($) {
my ($name) = @_;
return if $image_succeeded;
-sub stats_of {
+sub stats_of($) {
my ($name) = @_;
my $i = $stats_successes{$name};
my $j = $stats_attempts{$name};
my $current_start_time = 0;
-sub start_timer {
+sub start_timer($) {
my ($name) = @_;
$current_start_time = time;
}
}
-sub stop_timer {
+sub stop_timer($$) {
my ($name, $success) = @_;
$stats_elapsed{$name} += time - $current_start_time;
}
my $last_report_time = 0;
-sub report_performance {
+sub report_performance() {
return unless $verbose_warnings;
my $suc = $stats_successes{$name} || 0;
my $pct = int($suc * 100 / $try);
my $secs = $stats_elapsed{$name};
- my $secs_link = int($secs / $try);
- print STDERR sprintf ("$blurb %-12s %4s (%d/%d);\t %2d secs/link\n",
+ my $secs_link = $secs / $try;
+ print STDERR sprintf ("$blurb %-14s %4s (%d/%d);" .
+ " \t %.1f secs/link\n",
"$name:", "$pct%", $suc, $try, $secs_link);
}
}
my @recent_images = ();
my @recent_sites = ();
-sub save_recent_url {
+sub save_recent_url($$) {
my ($url, $base) = @_;
return unless ($verbose_warnings);
$_ = $url;
- my ($site) = m@^http://([^ \t\n\r/:]+)@;
+ my ($site) = m@^https?://([^ \t\n\r/:]+)@;
return unless defined ($site);
- if ($base eq $driftnet_magic) {
- $site = $driftnet_magic;
+ if ($base eq $driftnet_magic || $base eq $local_magic) {
+ $site = $base;
@recent_images = ();
}
# Does %-decoding.
#
-sub url_decode {
+sub url_decode($) {
($_) = @_;
tr/+/ /;
s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
# Given the raw body of a GIF document, returns the dimensions of the image.
#
-sub gif_size {
+sub gif_size($) {
my ($body) = @_;
my $type = substr($body, 0, 6);
my $s;
return () unless ($type =~ /GIF8[7,9]a/);
$s = substr ($body, 6, 10);
my ($a,$b,$c,$d) = unpack ("C"x4, $s);
+ return () unless defined ($d);
return (($b<<8|$a), ($d<<8|$c));
}
# Given the raw body of a JPEG document, returns the dimensions of the image.
#
-sub jpeg_size {
+sub jpeg_size($) {
my ($body) = @_;
my $i = 0;
my $L = length($body);
return ();
}
-# Given the raw body of a GIF or JPEG document, returns the dimensions of
-# the image.
+# Given the raw body of a PNG document, returns the dimensions of the image.
+#
+sub png_size($) {
+ my ($body) = @_;
+ return () unless ($body =~ m/^\211PNG\r/);
+ my ($bits) = ($body =~ m/^.{12}(.{12})/s);
+ return () unless defined ($bits);
+ return () unless ($bits =~ /^IHDR/);
+ my ($ign, $w, $h) = unpack("a4N2", $bits);
+ return ($w, $h);
+}
+
+
+# Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions
+# of the image.
#
-sub image_size {
+sub image_size($) {
my ($body) = @_;
my ($w, $h) = gif_size ($body);
if ($w && $h) { return ($w, $h); }
- return jpeg_size ($body);
+ ($w, $h) = jpeg_size ($body);
+ if ($w && $h) { return ($w, $h); }
+ return png_size ($body);
}
# returns the full path of the named program, or undef.
#
-sub which {
+sub which($) {
my ($prog) = @_;
foreach (split (/:/, $ENV{PATH})) {
- if (-x "$_/$prog") {
- return $prog;
+ my $path = "$_/$prog";
+ if (-x $path) {
+ return $path;
}
}
return undef;
# Like rand(), but chooses numbers with a bell curve distribution.
-sub bellrand {
+sub bellrand(;$) {
($_) = @_;
$_ = 1.0 unless defined($_);
$_ /= 3.0;
}
-sub signal_cleanup {
- my ($sig) = @_;
- print STDERR blurb() . (defined($sig)
- ? "caught signal $sig."
- : "exiting.")
- . "\n"
- if ($verbose_exec);
-
+sub exit_cleanup() {
x_cleanup();
-
+ print STDERR "$progname: exiting\n" if ($verbose_warnings);
if (@pids_to_kill) {
print STDERR blurb() . "killing: " . join(' ', @pids_to_kill) . "\n";
kill ('TERM', @pids_to_kill);
}
+}
+sub signal_cleanup($) {
+ my ($sig) = @_;
+ print STDERR blurb() . (defined($sig)
+ ? "caught signal $sig."
+ : "exiting.")
+ . "\n"
+ if ($verbose_exec || $verbose_warnings);
exit 1;
}
+
+
##############################################################################
#
# Generating a list of urls only
#
##############################################################################
-sub url_only_output {
+sub url_only_output() {
do {
my ($base, $img) = pick_image;
if ($img) {
##############################################################################
#
-# Running as an xscreensaver module
+# Running as an xscreensaver module, or as a web page imagemap
#
##############################################################################
-my $image_ppm = ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp") . "/webcollage." . $$;
-my $image_tmp1 = $image_ppm . "-1";
-my $image_tmp2 = $image_ppm . "-2";
+my ($image_ppm, $image_tmp1, $image_tmp2);
+{
+ my $seed = rand(0xFFFFFFFF);
+ $image_ppm = sprintf ("%s/webcollage-%08x",
+ ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
+ $seed);
+ $image_tmp1 = $image_ppm . '-1.ppm';
+ $image_tmp2 = $image_ppm . '-2.ppm';
+ $image_ppm .= '.ppm';
+}
+
my $filter_cmd = undef;
my $post_filter_cmd = undef;
my $background = undef;
+my @imagemap_areas = ();
+my $imagemap_html_tmp = undef;
+my $imagemap_jpg_tmp = undef;
+
+
my $img_width; # size of the image being generated.
my $img_height;
my $delay = 2;
-sub x_cleanup {
+sub x_cleanup() {
unlink $image_ppm, $image_tmp1, $image_tmp2;
+ unlink $imagemap_html_tmp, $imagemap_jpg_tmp
+ if (defined ($imagemap_html_tmp));
}
# Like system, but prints status about exit codes, and kills this process
# with whatever signal killed the sub-process, if any.
#
-sub nontrapping_system {
+sub nontrapping_system(@) {
$! = 0;
$_ = join(" ", @_);
}
-# Given the URL of a GIF or JPEG image, and the body of that image, writes a
-# PPM to the given output file. Returns the width/height of the image if
-# successful.
+# Given the URL of a GIF, JPEG, or PNG image, and the body of that image,
+# writes a PPM to the given output file. Returns the width/height of the
+# image if successful.
#
-sub image_to_pnm {
+sub image_to_pnm($$$) {
my ($url, $body, $output) = @_;
my ($cmd, $cmd2, $w, $h);
} elsif ((@_ = jpeg_size ($body))) {
($w, $h) = @_;
$cmd = "djpeg";
+ } elsif ((@_ = png_size ($body))) {
+ ($w, $h) = @_;
+ $cmd = "pngtopnm";
} else {
LOG (($verbose_pbm || $verbose_load),
- "not a GIF or JPG" .
- (($body =~ m@<(base|html|head|body|script|table|a href)>@i)
+ "not a GIF, JPG, or PNG" .
+ (($body =~ m@<(base|html|head|body|script|table|a href)\b@i)
? " (looks like HTML)" : "") .
": $url");
$suppress_audit = 1;
$body = undef;
};
- if (($pid = open(PIPE, "| $cmd2 > $output"))) {
+ if (($pid = open (my $pipe, "| $cmd2 > $output"))) {
$timed_out = 0;
alarm $cvt_timeout;
- print PIPE $body;
+ print $pipe $body;
$body = undef;
- close PIPE;
+ close $pipe;
LOG ($verbose_exec, "awaiting $pid");
waitpid ($pid, 0);
}
}
-sub pick_root_displayer {
+
+# Same as the "ppmmake" command: creates a solid-colored PPM.
+# Does not understand the rgb.txt color names except "black" and "white".
+#
+sub ppmmake($$$$) {
+ my ($outfile, $bgcolor, $w, $h) = @_;
+
+ my ($r, $g, $b);
+ if ($bgcolor =~ m/^\#?([\dA-F][\dA-F])([\dA-F][\dA-F])([\dA-F][\dA-F])$/i ||
+ $bgcolor =~ m/^\#?([\dA-F])([\dA-F])([\dA-F])$/i) {
+ ($r, $g, $b) = (hex($1), hex($2), hex($3));
+ } elsif ($bgcolor =~ m/^black$/i) {
+ ($r, $g, $b) = (0, 0, 0);
+ } elsif ($bgcolor =~ m/^white$/i) {
+ ($r, $g, $b) = (0xFF, 0xFF, 0xFF);
+ } else {
+ error ("unparsable color name: $bgcolor");
+ }
+
+ my $pixel = pack('CCC', $r, $g, $b);
+ my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h));
+
+ open (my $out, '>', $outfile) || error ("$outfile: $!");
+ print $out $bits;
+ close $out;
+}
+
+
+sub pick_root_displayer() {
my @names = ();
+ if ($cocoa_p) {
+ # see "xscreensaver/hacks/webcollage-cocoa.m"
+ return "echo COCOA LOAD ";
+ }
+
foreach my $cmd (@root_displayers) {
$_ = $cmd;
my ($name) = m/^([^ ]+)/;
my $ppm_to_root_window_cmd = undef;
-sub x_or_pbm_output {
+sub x_or_pbm_output($) {
+ my ($window_id) = @_;
+
+ # Adjust the PATH for OS X 10.10.
+ #
+ $_ = $0;
+ s:/[^/]*$::;
+ s/([^a-zA-Z0-9._\-+\/])/\\$1/g;
+ $ENV{PATH} = "$_:$ENV{PATH}";
# Check for our helper program, to see whether we need to use PPM pipelines.
#
$_ = "webcollage-helper";
- if (defined ($webcollage_helper) || which ($_)) {
- $webcollage_helper = $_ unless (defined($webcollage_helper));
+
+ if (! defined ($webcollage_helper)) {
+ $webcollage_helper = which ($_);
+ }
+
+ if (defined ($webcollage_helper)) {
LOG ($verbose_pbm, "found \"$webcollage_helper\"");
- $webcollage_helper .= " -v";
+ $webcollage_helper = "'$webcollage_helper' -v";
} else {
LOG (($verbose_pbm || $verbose_load), "no $_ program");
}
+ if ($cocoa_p && !defined ($webcollage_helper)) {
+ error ("webcollage-helper not found in Cocoa-mode!");
+ }
+
+
# make sure the various programs we execute exist, right up front.
#
- my @progs = ("ppmmake"); # always need this one
+ my @progs = ();
if (!defined($webcollage_helper)) {
# Only need these others if we don't have the helper.
- @progs = (@progs, "giftopnm", "djpeg", "pnmpaste", "pnmscale", "pnmcut");
+ @progs = (@progs,
+ "giftopnm", "pngtopnm", "djpeg",
+ "pnmpaste", "pnmscale", "pnmcut");
}
foreach (@progs) {
which ($_) || error "$_ not found on \$PATH.";
}
+ # If we're using webcollage-helper and not a filter, then the tmp files
+ # are JPEGs, not PPMs.
+ #
+ if (defined ($webcollage_helper) && !defined ($filter_cmd)) {
+ foreach ($image_ppm, $image_tmp1, $image_tmp2) {
+ s/\.ppm$/.jpg/s;
+ }
+ }
+
+
# find a root-window displayer program.
#
- $ppm_to_root_window_cmd = pick_root_displayer();
+ if (!$no_output_p) {
+ $ppm_to_root_window_cmd = pick_root_displayer();
+ }
+
+ if (defined ($window_id)) {
+ error ("-window-id only works if xscreensaver-getimage is installed")
+ unless ($ppm_to_root_window_cmd =~ m/^xscreensaver-getimage\b/);
+
+ error ("unparsable window id: $window_id")
+ unless ($window_id =~ m/^\d+$|^0x[\da-f]+$/i);
+ $ppm_to_root_window_cmd =~ s/--?root\b/$window_id/ ||
+ error ("unable to munge displayer: $ppm_to_root_window_cmd");
+ }
if (!$img_width || !$img_height) {
- $_ = "xdpyinfo";
- which ($_) || error "$_ not found on \$PATH.";
- $_ = `$_`;
- ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /;
- if (!defined($img_height)) {
- error "xdpyinfo failed.";
+
+ if (!defined ($window_id) &&
+ defined ($ENV{XSCREENSAVER_WINDOW})) {
+ $window_id = $ENV{XSCREENSAVER_WINDOW};
+ }
+
+ if (!defined ($window_id)) {
+ $_ = "xdpyinfo";
+ which ($_) || error "$_ not found on \$PATH.";
+ $_ = `$_`;
+ ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /;
+ if (!defined($img_height)) {
+ error "xdpyinfo failed.";
+ }
+ } else { # we have a window id
+ $_ = "xwininfo";
+ which ($_) || error "$_ not found on \$PATH.";
+ $_ .= " -id $window_id";
+ $_ = `$_`;
+ ($img_width, $img_height) = m/^\s*Width:\s*(\d+)\n\s*Height:\s*(\d+)\n/m;
+
+ if (!defined($img_height)) {
+ error "xwininfo failed.";
+ }
}
}
# Create the sold-colored base image.
#
- $_ = "ppmmake '$bgcolor' $img_width $img_height";
- LOG ($verbose_pbm, "creating base image: $_");
- nontrapping_system "$_ > $image_ppm";
+ LOG ($verbose_pbm, "creating base image: ${img_width}x${img_height}");
+ $_ = ppmmake ($image_ppm, $bgcolor, $img_width, $img_height);
# Paste the default background image in the middle of it.
#
my ($iw, $ih);
my $body = "";
- local *IMG;
- open(IMG, "<$bgimage") || error "couldn't open $bgimage: $!";
- my $cmd;
- while (<IMG>) { $body .= $_; }
- close (IMG);
+ open (my $imgf, '<', $bgimage) || error "couldn't open $bgimage: $!";
+ local $/ = undef; # read entire file
+ $body = <$imgf>;
+ close ($imgf);
+ my $cmd;
if ((@_ = gif_size ($body))) {
($iw, $ih) = @_;
$cmd = "giftopnm |";
($iw, $ih) = @_;
$cmd = "djpeg |";
+ } elsif ((@_ = png_size ($body))) {
+ ($iw, $ih) = @_;
+ $cmd = "pngtopnm |";
+
} elsif ($body =~ m/^P\d\n(\d+) (\d+)\n/) {
$iw = $1;
$ih = $2;
$cmd = "";
} else {
- error "$bgimage is not a GIF, JPEG, or PPM.";
+ error "$bgimage is not a GIF, JPEG, PNG, or PPM.";
}
my $x = int (($img_width - $iw) / 2);
"pasting $bgimage (${iw}x$ih) into base image at $x,$y");
$cmd .= "pnmpaste - $x $y $image_ppm > $image_tmp1";
- open (IMG, "| $cmd") || error "running $cmd: $!";
- print IMG $body;
+ open ($imgf, "| $cmd") || error "running $cmd: $!";
+ print $imgf $body;
$body = undef;
- close (IMG);
+ close ($imgf);
LOG ($verbose_exec, "subproc exited normally.");
rename ($image_tmp1, $image_ppm) ||
error "renaming $image_tmp1 to $image_ppm: $!";
}
}
-sub paste_image {
+sub paste_image($$$$) {
my ($base, $img, $body, $source) = @_;
$current_state = "paste";
($iw, $ih) = image_size ($body);
if (!$iw || !$ih) {
LOG (($verbose_pbm || $verbose_load),
- "not a GIF or JPG" .
+ "not a GIF, JPG, or PNG" .
(($body =~ m@<(base|html|head|body|script|table|a href)>@i)
? " (looks like HTML)" : "") .
": $img");
return 0;
}
- local *OUT;
- open (OUT, ">$image_tmp1") || error ("writing $image_tmp1: $!");
- print OUT $body || error ("writing $image_tmp1: $!");
- close OUT || error ("writing $image_tmp1: $!");
+ if ($iw <= 0 || $ih <= 0 || $iw > 9999 || $ih > 9999) {
+ LOG (($verbose_pbm || $verbose_load),
+ "ludicrous image dimensions: $iw x $ih (" . length($body) .
+ "): $img");
+ $body = undef;
+ return 0;
+ }
+
+ open (my $out, '>', $image_tmp1) || error ("writing $image_tmp1: $!");
+ (print $out $body) || error ("writing $image_tmp1: $!");
+ close ($out) || error ("writing $image_tmp1: $!");
} else {
($iw, $ih) = image_to_pnm ($img, $body, $image_tmp1);
rename ($image_tmp2, $image_tmp1);
# re-get the width/height in case the filter resized it.
- local *IMG;
- open(IMG, "<$image_tmp1") || return 0;
- $_ = <IMG>;
- $_ = <IMG>;
+ open (my $imgf, '<', $image_tmp1) || return 0;
+ $_ = <$imgf>;
+ $_ = <$imgf>;
($iw, $ih) = m/^(\d+) (\d+)$/;
- close (IMG);
+ close ($imgf);
return 0 unless ($iw && $ih);
}
- my $target_w = $img_width;
+ my $target_w = $img_width; # max rectangle into which the image must fit
my $target_h = $img_height;
my $cmd = "";
# Usually scale the image to fit on the screen -- but sometimes scale it
- # to fit on half or a quarter of the screen. Note that we don't merely
- # scale it to fit, we instead cut it in half until it fits -- that should
- # give a wider distribution of sizes.
+ # to fit on half or a quarter of the screen. (We do this by reducing the
+ # size of the target rectangle.) Note that the image is not merely scaled
+ # to fit; we instead cut the image in half repeatedly until it fits in the
+ # target rectangle -- that gives a wider distribution of sizes.
#
- if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; }
- if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; }
+ if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; } # reduce target rect
+ if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; }
if ($iw > $target_w || $ih > $target_h) {
while ($iw > $target_w ||
$ih > $target_h) {
$iw = int($iw / 2);
$ih = int($ih / 2);
+ $scale /= 2;
}
if ($iw <= 10 || $ih <= 10) {
LOG ($verbose_pbm, "scaling to ${iw}x$ih would have been bogus.");
return 0;
}
- LOG ($verbose_pbm, "scaling to ${iw}x$ih");
+ LOG ($verbose_pbm, "scaling to ${iw}x$ih ($scale)");
$cmd .= " | pnmscale -xsize $iw -ysize $ih";
}
# If any cropping needs to happen, add pnmcut.
#
if ($crop_x != 0 || $crop_y != 0 ||
- $crop_w != $iw || $crop_h != $ih) {
+ $crop_w != $iw || $crop_h != $ih) {
$iw = $crop_w;
$ih = $crop_h;
$cmd .= " | pnmcut $crop_x $crop_y $iw $ih";
# the next network retrieval, which is probably a better thing
# to do anyway.
#
- $cmd .= " &";
+ $cmd .= " &" unless ($cocoa_p);
$rc = nontrapping_system ($cmd);
$source .= "-" . stats_of($source);
print STDOUT "image: ${iw}x${ih} @ $x,$y $base $source\n"
if ($verbose_imgmap);
+ if ($imagemap_base) {
+ update_imagemap ($base, $x, $y, $iw, $ih,
+ $image_ppm, $img_width, $img_height);
+ }
clearlog();
}
-sub init_signals {
+sub update_imagemap($$$$$$$$) {
+ my ($url, $x, $y, $w, $h, $image_ppm, $image_width, $image_height) = @_;
+
+ $current_state = "imagemap";
+
+ my $max_areas = 200;
+
+ $url = html_quote ($url);
+ my $x2 = $x + $w;
+ my $y2 = $y + $h;
+ my $area = "<AREA SHAPE=RECT COORDS=\"$x,$y,$x2,$y2\" HREF=\"$url\">";
+ unshift @imagemap_areas, $area; # put one on the front
+ if ($#imagemap_areas >= $max_areas) {
+ pop @imagemap_areas; # take one off the back.
+ }
+
+ LOG ($verbose_pbm, "area: $x,$y,$x2,$y2 (${w}x$h)");
+
+ my $map_name = $imagemap_base;
+ $map_name =~ s@^.*/@@;
+ $map_name = 'collage' if ($map_name eq '');
+
+ my $imagemap_html = $imagemap_base . ".html";
+ my $imagemap_jpg = $imagemap_base . ".jpg";
+ my $imagemap_jpg2 = $imagemap_jpg;
+ $imagemap_jpg2 =~ s@^.*/@@gs;
+
+ if (!defined ($imagemap_html_tmp)) {
+ $imagemap_html_tmp = $imagemap_html . sprintf (".%08x", rand(0xffffffff));
+ $imagemap_jpg_tmp = $imagemap_jpg . sprintf (".%08x", rand(0xffffffff));
+ }
+
+ # Read the imagemap html file (if any) to get a template.
+ #
+ my $template_html = '';
+ {
+ if (open (my $in, '<', $imagemap_html)) {
+ local $/ = undef; # read entire file
+ $template_html = <$in>;
+ close $in;
+ LOG ($verbose_pbm, "read template $imagemap_html");
+ }
+
+ if ($template_html =~ m/^\s*$/s) {
+ $template_html = ("<MAP NAME=\"$map_name\"></MAP>\n" .
+ "<IMG SRC=\"$imagemap_jpg2\"" .
+ " USEMAP=\"$map_name\">\n");
+ LOG ($verbose_pbm, "created dummy template");
+ }
+ }
+
+ # Write the jpg to a tmp file
+ #
+ {
+ my $cmd;
+ if (defined ($webcollage_helper)) {
+ $cmd = "cp -p $image_ppm $imagemap_jpg_tmp";
+ } else {
+ $cmd = "cjpeg < $image_ppm > $imagemap_jpg_tmp";
+ }
+ my $rc = nontrapping_system ($cmd);
+ if ($rc != 0) {
+ error ("imagemap jpeg failed: \"$cmd\"\n");
+ }
+ }
+
+ # Write the html to a tmp file
+ #
+ {
+ my $body = $template_html;
+ my $areas = join ("\n\t", @imagemap_areas);
+ my $map = ("<MAP NAME=\"$map_name\">\n\t$areas\n</MAP>");
+ my $img = ("<IMG SRC=\"$imagemap_jpg2\" " .
+ "BORDER=0 " .
+ "WIDTH=$image_width HEIGHT=$image_height " .
+ "USEMAP=\"#$map_name\">");
+ $body =~ s@(<MAP\s+NAME=\"[^\"]*\"\s*>).*?(</MAP>)@$map@is;
+ $body =~ s@<IMG\b[^<>]*\bUSEMAP\b[^<>]*>@$img@is;
+
+ # if there are magic webcollage spans in the html, update those too.
+ #
+ {
+ my @st = stat ($imagemap_jpg_tmp);
+ my $date = strftime("%d-%b-%Y %l:%M:%S %p %Z", localtime($st[9]));
+ my $size = int(($st[7] / 1024) + 0.5) . "K";
+ $body =~ s@(<SPAN\s+CLASS=\"webcollage_date\">).*?(</SPAN>)@$1$date$2@si;
+ $body =~ s@(<SPAN\s+CLASS=\"webcollage_size\">).*?(</SPAN>)@$1$size$2@si;
+ }
+
+ open (my $out, '>', $imagemap_html_tmp) || error ("$imagemap_html_tmp: $!");
+ (print $out $body) || error ("$imagemap_html_tmp: $!");
+ close ($out) || error ("$imagemap_html_tmp: $!");
+ LOG ($verbose_pbm, "wrote $imagemap_html_tmp");
+ }
+
+ # Rename the two tmp files to the real files
+ #
+ rename ($imagemap_html_tmp, $imagemap_html) ||
+ error "renaming $imagemap_html_tmp to $imagemap_html";
+ LOG ($verbose_pbm, "wrote $imagemap_html");
+ rename ($imagemap_jpg_tmp, $imagemap_jpg) ||
+ error "renaming $imagemap_jpg_tmp to $imagemap_jpg";
+ LOG ($verbose_pbm, "wrote $imagemap_jpg");
+}
+
+
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
+sub set_proxy() {
+
+ if (! defined($http_proxy)) {
+ # historical suckage: the environment variable name is lower case.
+ $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
+ }
+
+ if (defined ($http_proxy)) {
+ if ($http_proxy && $http_proxy =~ m@^https?://([^/]*)/?$@ ) {
+ # historical suckage: allow "http://host:port" as well as "host:port".
+ $http_proxy = $1;
+ }
+
+ } else {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ # Note: this ignores the "ExceptionsList".
+ if ($server) {
+ $http_proxy = $server;
+ $http_proxy .= ":$port" if $port;
+ }
+ }
+
+ delete $ENV{http_proxy};
+ delete $ENV{HTTP_PROXY};
+ delete $ENV{https_proxy};
+ delete $ENV{HTTPS_PROXY};
+ delete $ENV{PERL_LWP_ENV_PROXY};
+
+ if ($http_proxy) {
+ $http_proxy = 'http://' . $http_proxy;
+ LOG ($verbose_net, "proxy server: $http_proxy");
+ } else {
+ $http_proxy = undef; # for --proxy ''
+ }
+}
+
+
+sub init_signals() {
$SIG{HUP} = \&signal_cleanup;
$SIG{INT} = \&signal_cleanup;
$SIG{PIPE} = 'IGNORE';
}
-END { signal_cleanup(); }
+END { exit_cleanup(); }
-sub main {
+sub main() {
$| = 1;
srand(time ^ $$);
$load_method = "none";
my $root_p = 0;
+ my $window_id = undef;
- # historical suckage: the environment variable name is lower case.
- $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
-
- while ($_ = $ARGV[0]) {
- shift @ARGV;
- if ($_ eq "-display" ||
- $_ eq "-displ" ||
- $_ eq "-disp" ||
- $_ eq "-dis" ||
- $_ eq "-dpy" ||
- $_ eq "-d") {
+ while ($#ARGV >= 0) {
+ $_ = shift @ARGV;
+ if (m/^--?d(i(s(p(l(a(y)?)?)?)?)?)?$/s) {
$ENV{DISPLAY} = shift @ARGV;
- } elsif ($_ eq "-root") {
+ } elsif (m/^--?root$/s) {
+ $root_p = 1;
+ } elsif (m/^--?window-id$/s) {
+ $window_id = shift @ARGV;
$root_p = 1;
- } elsif ($_ eq "-no-output") {
+ } elsif (m/^--?no-output$/s) {
$no_output_p = 1;
- } elsif ($_ eq "-urls-only") {
+ } elsif (m/^--?urls(-only)?$/s) {
$urls_only_p = 1;
$no_output_p = 1;
- } elsif ($_ eq "-verbose") {
+ } elsif (m/^--?cocoa$/s) {
+ $cocoa_p = 1;
+ } elsif (m/^--?imagemap$/s) {
+ $imagemap_base = shift @ARGV;
+ $no_output_p = 1;
+ } elsif (m/^--?verbose$/s) {
$verbose++;
} elsif (m/^-v+$/) {
$verbose += length($_)-1;
- } elsif ($_ eq "-delay") {
+ } elsif (m/^--?delay$/s) {
$delay = shift @ARGV;
- } elsif ($_ eq "-timeout") {
+ } elsif (m/^--?timeout$/s) {
$http_timeout = shift @ARGV;
- } elsif ($_ eq "-filter") {
+ } elsif (m/^--?filter$/s) {
$filter_cmd = shift @ARGV;
- } elsif ($_ eq "-filter2") {
+ } elsif (m/^--?filter2$/s) {
$post_filter_cmd = shift @ARGV;
- } elsif ($_ eq "-background" || $_ eq "-bg") {
+ } elsif (m/^--?(background|bg)$/s) {
$background = shift @ARGV;
- } elsif ($_ eq "-size") {
+ } elsif (m/^--?size$/s) {
$_ = shift @ARGV;
if (m@^(\d+)x(\d+)$@) {
$img_width = $1;
$img_height = $2;
} else {
- error "argument to \"-size\" must be of the form \"640x400\"";
+ error "argument to \"--size\" must be of the form \"640x400\"";
}
- } elsif ($_ eq "-proxy" || $_ eq "-http-proxy") {
+ } elsif (m/^--?(http-)?proxy$/s) {
$http_proxy = shift @ARGV;
- } elsif ($_ eq "-dictionary" || $_ eq "-dict") {
+ } elsif (m/^--?dict(ionary)?$/s) {
$dict = shift @ARGV;
- } elsif ($_ eq "-driftnet" || $_ eq "--driftnet") {
+ } elsif (m/^--?opacity$/s) {
+ $opacity = shift @ARGV;
+ error ("opacity must be between 0.0 and 1.0")
+ if ($opacity <= 0 || $opacity > 1);
+ } elsif (m/^--?driftnet$/s) {
@search_methods = ( 100, "driftnet", \&pick_from_driftnet );
if (! ($ARGV[0] =~ m/^-/)) {
$driftnet_cmd = shift @ARGV;
} else {
$driftnet_cmd = $default_driftnet_cmd;
}
- } elsif ($_ eq "-debug" || $_ eq "--debug") {
+ } elsif (m/^--?dir(ectory)?$/s) {
+ @search_methods = ( 100, "local", \&pick_from_local_dir );
+ if (! ($ARGV[0] =~ m/^-/)) {
+ $local_dir = shift @ARGV;
+ } else {
+ error ("local directory path must be set")
+ }
+ } elsif (m/^--?fps$/s) {
+ # -fps only works on MacOS, via "webcollage-cocoa.m".
+ # Ignore it if passed to this script in an X11 context.
+ } elsif (m/^--?debug$/s) {
my $which = shift @ARGV;
my @rest = @search_methods;
my $ok = 0;
}
error "no such search method as \"$which\"" unless ($ok);
LOG (1, "DEBUG: using only \"$which\"");
+ $report_performance_interval = 30;
} else {
+ print STDERR "unknown option: $_\n\n";
print STDERR "$copyright\nusage: $progname " .
- "[-root] [-display dpy] [-verbose] [-debug which]\n" .
- "\t\t [-timeout secs] [-delay secs] [-filter cmd] [-filter2 cmd]\n" .
- "\t\t [-no-output] [-urls-only] [-background color] [-size WxH]\n" .
- "\t\t [-dictionary dictionary-file] [-http-proxy host[:port]]\n" .
- "\t\t [-driftnet [driftnet-program-and-args]]\n" .
+ "[--root] [--display dpy] [--verbose] [--debug which]\n" .
+ "\t\t [--timeout secs] [--delay secs] [--size WxH]\n" .
+ "\t\t [--no-output] [--urls-only] [--imagemap filename]\n" .
+ "\t\t [--background color] [--opacity f]\n" .
+ "\t\t [--filter cmd] [--filter2 cmd]\n" .
+ "\t\t [--dictionary dictionary-file] [--http-proxy host[:port]]\n" .
+ "\t\t [--driftnet [driftnet-program-and-args]]\n" .
+ "\t\t [--directory local-image-directory]\n" .
"\n";
exit 1;
}
}
- if ($http_proxy && $http_proxy eq "") {
- $http_proxy = undef;
- }
- if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
- # historical suckage: allow "http://host:port" as well as "host:port".
- $http_proxy = $1;
- }
-
- if (!$root_p && !$no_output_p) {
+ if (!$root_p && !$no_output_p && !$cocoa_p) {
print STDERR $copyright;
- error "the -root argument is mandatory (for now.)";
+ error "the --root argument is mandatory (for now.)";
}
- if (!$no_output_p && !$ENV{DISPLAY}) {
+ if (!$no_output_p && !$cocoa_p && !$ENV{DISPLAY}) {
error "\$DISPLAY is not set.";
}
pick_dictionary();
}
+ if ($imagemap_base && !($img_width && $img_height)) {
+ error ("--size WxH is required with --imagemap");
+ }
+
+ if (defined ($local_dir)) {
+ $_ = "xscreensaver-getimage-file";
+ which ($_) || error "$_ not found on \$PATH.";
+ }
+
init_signals();
+ set_proxy();
spawn_driftnet ($driftnet_cmd) if ($driftnet_cmd);
if ($urls_only_p) {
- url_only_output;
+ url_only_output ();
} else {
- x_or_pbm_output;
+ x_or_pbm_output ($window_id);
}
}
-main;
+main();
exit (0);