#!/usr/bin/perl -w
#
-# webcollage, Copyright (c) 1999-2002 by Jamie Zawinski <jwz@jwz.org>
+# webcollage, Copyright (c) 1999-2012 by Jamie Zawinski <jwz@jwz.org>
# This program decorates the screen with random images from the web.
# One satisfied customer described it as "a nonstop pop culture brainbath."
#
# software for any purpose. It is provided "as is" without express or
# implied warranty.
+
# To run this as a display mode with xscreensaver, add this to `programs':
#
-# default-n: webcollage -root \n\
-# default-n: webcollage -root -filter 'vidwhacker -stdin -stdout' \n\
+# webcollage -root
+# webcollage -root -filter 'vidwhacker -stdin -stdout'
+#
+#
+# You can see this in action at http://www.jwz.org/webcollage/ --
+# it auto-reloads about once a minute. To make a page similar to
+# that on your own system, do this:
+#
+# webcollage -size '800x600' -imagemap $HOME/www/webcollage/index
+#
+#
+# If you have the "driftnet" program installed, webcollage can display a
+# collage of images sniffed off your local ethernet, instead of pulled out
+# of search engines: in that way, your screensaver can display the images
+# that your co-workers are downloading!
+#
+# Driftnet is available here: http://www.ex-parrot.com/~chris/driftnet/
+# Use it like so:
+#
+# webcollage -root -driftnet
+#
+# Driftnet is the Unix implementation of the MacOS "EtherPEG" program.
require 5;
use Fcntl ':flock'; # import LOCK_* constants
use POSIX qw(strftime);
+use bytes; # Larry can take Unicode and shove it up his ass sideways.
+ # Perl 5.8.0 causes us to start getting incomprehensible
+ # errors about UTF-8 all over the place without this.
+
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.87 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
-my $copyright = "WebCollage $version, Copyright (c) 1999-2002" .
+my $version = q{ $Revision: 1.159 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my $copyright = "WebCollage $version, Copyright (c) 1999-2011" .
" Jamie Zawinski <jwz\@jwz.org>\n" .
- " http://www.jwz.org/xscreensaver/\n";
-
-
-
-my @search_methods = ( 30, "imagevista", \&pick_from_alta_vista_images,
- 28, "altavista", \&pick_from_alta_vista_text,
- 18, "yahoorand", \&pick_from_yahoo_random_link,
- 14, "googleimgs", \&pick_from_google_images,
- 2, "yahoonews", \&pick_from_yahoo_news_text,
- 8, "lycos", \&pick_from_lycos_text,
-
- # Hotbot gives me "no matches" just about every time.
- # Then I try the same URL again, and it works. I guess
- # it caches searches, and webcollage always busts its
- # cache and time out? Or it just sucks.
- # 0, "hotbot", \&pick_from_hotbot_text,
+ " http://www.jwz.org/webcollage/\n";
+
+
+
+my @search_methods = ( 26, "googlephotos", \&pick_from_google_image_photos,
+ 15, "googleimgs", \&pick_from_google_images,
+ 15, "googlenums", \&pick_from_google_image_numbers,
+ 17, "flickr_recent", \&pick_from_flickr_recent,
+ 14, "flickr_random", \&pick_from_flickr_random,
+# twitpic went stale. don't have time to fix it right now.
+# 10, "twitpic", \&pick_from_twitpic_images,
+ 9, "livejournal", \&pick_from_livejournal_images,
+ 4, "yahoorand", \&pick_from_yahoo_random_link,
+
+ # This one doesn't work very well: too many non-img links.
+ 0, "twitter", \&pick_from_twitter_images,
+
+ # This is a cute way to search for a certain webcams.
+ # Not included in default methods, since these images
+ # aren't terribly interesting by themselves.
+ # See also "SurveillanceSaver".
+ #
+ 0, "securitycam", \&pick_from_security_camera,
+
+ # Nonfunctional as of June 2011.
+ # 0, "altavista", \&pick_from_alta_vista_random_link,
+
+ # In Apr 2002, Google asked me to stop searching them.
+ # I asked them to add a "random link" url. They said
+ # "that would be easy, we'll think about it" and then
+ # never wrote back. Booo Google! Booooo! So, screw
+ # those turkeys, I've turned Google searching back on.
+ # I'm sure they can take it. (Jan 2005.)
+
+ # Jan 2005: Yahoo fucked up their search form so that
+ # it's no longer possible to do "or" searches on news
+ # images, so we rarely get any hits there any more.
+ #
+ # 0, "yahoonews", \&pick_from_yahoo_news_text,
+
+ # Dec 2004: the ircimages guy's server can't take the
+ # heat, so he started banning the webcollage user agent.
+ # I tried to convince him to add a lighter-weight page to
+ # support webcollage better, but he doesn't care.
+ #
+ # 0, "ircimages", \&pick_from_ircimages,
+
+ # Dec 2002: Alta Vista has a new "random link" URL now.
+ # They added it specifically to better support webcollage!
+ # That was super cool of them. This is how we used to do
+ # it, before:
+ #
+ # 0, "avimages", \&pick_from_alta_vista_images,
+ # 0, "avtext", \&pick_from_alta_vista_text,
+
+ # This broke in 2004. Eh, Lycos sucks anyway.
+ #
+ # 0, "lycos", \&pick_from_lycos_text,
+
+ # This broke in 2003, I think. I suspect Hotbot is
+ # actually the same search engine data as Lycos.
+ #
+ # 0, "hotbot", \&pick_from_hotbot_text,
);
# programs we can use to write to the root window (tried in ascending order.)
#
my @root_displayers = (
+ "xscreensaver-getimage -root -file",
"chbg -once -xscreensaver -max_size 100",
- "xv -root -quit -viewonly -noresetroot -quick24 -rmode 5" .
+ "xv -root -quit -viewonly +noresetroot -quick24 -rmode 5" .
" -rfg black -rbg black",
"xli -quiet -onroot -center -border black",
"xloadimage -quiet -onroot -center -border black",
"www.nytimes.com" => 'NYT-S=18cHMIlJOn2Y1bu5xvEG3Ufuk6E1oJ.' .
'FMxWaQV0igaB5Yi/Q/guDnLeoL.pe7i1oakSb' .
'/VqfdUdb2Uo27Vzt1jmPn3cpYRlTw9',
+
+ "ircimages.com" => 'disclaimer=1',
);
#
my %poisoners = (
"die.net" => 1, # 'l33t h4ck3r d00dz.
- "genforum.genealogy.com" => 1, # Cluttering altavista with human names.
- "rootsweb.com" => 1, # Cluttering altavista with human names.
+ "genforum.genealogy.com" => 1, # Cluttering avtext with human names.
+ "rootsweb.com" => 1, # Cluttering avtext with human names.
"akamai.net" => 1, # Lots of sites have their images on Akamai.
- # But those are pretty much all banners.
+ "akamaitech.net" => 1, # But those are pretty much all banners.
# Since Akamai is super-expensive, let's
# go out on a limb and assume that all of
# their customers are rich-and-boring.
- "bartleby.com" => 1, # Dictionary, cluttering altavista.
- "encyclopedia.com" => 1, # Dictionary, cluttering altavista.
- "onlinedictionary.datasegment.com" => 1, # Dictionary, cluttering altavista.
- "hotlinkpics.com" => 1, # Porn site that has poisoned imagevista
+ "bartleby.com" => 1, # Dictionary, cluttering avtext.
+ "encyclopedia.com" => 1, # Dictionary, cluttering avtext.
+ "onlinedictionary.datasegment.com" => 1, # Dictionary, cluttering avtext.
+ "hotlinkpics.com" => 1, # Porn site that has poisoned avimages
# (I don't see how they did it, though!)
"alwayshotels.com" => 1, # Poisoned Lycos pretty heavily.
+ "nextag.com" => 1, # Poisoned Alta Vista real good.
+ "ghettodriveby.com" => 1, # Poisoned Google Images.
+ "crosswordsolver.org" => 1, # Poisoned Google Images.
+ "xona.com" => 1, # Poisoned Google Images.
+ "freepatentsonline.com" => 1, # Poisoned Google Images.
+ "herbdatanz.com" => 1, # Poisoned Google Images.
);
# site" diagnostic message.
#
my %warningless_sites = (
- "home.earthlink.net" => 1, # Lots of home pages here.
- "www.geocities.com" => 1,
+ "home.earthlink.net" => 1,
"www.angelfire.com" => 1,
"members.aol.com" => 1,
+ "img.photobucket.com" => 1,
+ "pics.livejournal.com" => 1,
+ "tinypic.com" => 1,
+ "flickr.com" => 1,
+ "pbase.com" => 1,
+ "blogger.com" => 1,
+ "multiply.com" => 1,
+ "wikimedia.org" => 1,
+ "twitpic.com" => 1,
+ "amazonaws.com" => 1,
+ "blogspot.com" => 1,
+ "photoshelter.com" => 1,
+ "myspacecdn.com" => 1,
+ "feedburner.com" => 1,
+ "wikia.com" => 1,
+ "ljplus.ru" => 1,
+ "yandex.ru" => 1,
+ "imgur.com" => 1,
+ "yfrog.com" => 1,
"yimg.com" => 1, # This is where dailynews.yahoo.com stores
"eimg.com" => 1, # its images, so pick_from_yahoo_news_text()
# hits this every time.
+
+ "images.quizfarm.com" => 1, # damn those LJ quizzes...
+ "images.quizilla.com" => 1,
+ "images.quizdiva.net" => 1,
+
+ "driftnet" => 1, # builtin...
+ "local-directory" => 1, # builtin...
+);
+
+
+# For decoding HTML-encoded character entities to URLs.
+#
+my %entity_table = (
+ "apos" => '\'',
+ "quot" => '"', "amp" => '&', "lt" => '<', "gt" => '>',
+ "nbsp" => ' ', "iexcl" => '¡', "cent" => '¢', "pound" => '£',
+ "curren" => '¤', "yen" => '¥', "brvbar" => '¦', "sect" => '§',
+ "uml" => '¨', "copy" => '©', "ordf" => 'ª', "laquo" => '«',
+ "not" => '¬', "shy" => '', "reg" => '®', "macr" => '¯',
+ "deg" => '°', "plusmn" => '±', "sup2" => '²', "sup3" => '³',
+ "acute" => '´', "micro" => 'µ', "para" => '¶', "middot" => '·',
+ "cedil" => '¸', "sup1" => '¹', "ordm" => 'º', "raquo" => '»',
+ "frac14" => '¼', "frac12" => '½', "frac34" => '¾', "iquest" => '¿',
+ "Agrave" => 'À', "Aacute" => 'Á', "Acirc" => 'Â', "Atilde" => 'Ã',
+ "Auml" => 'Ä', "Aring" => 'Å', "AElig" => 'Æ', "Ccedil" => 'Ç',
+ "Egrave" => 'È', "Eacute" => 'É', "Ecirc" => 'Ê', "Euml" => 'Ë',
+ "Igrave" => 'Ì', "Iacute" => 'Í', "Icirc" => 'Î', "Iuml" => 'Ï',
+ "ETH" => 'Ð', "Ntilde" => 'Ñ', "Ograve" => 'Ò', "Oacute" => 'Ó',
+ "Ocirc" => 'Ô', "Otilde" => 'Õ', "Ouml" => 'Ö', "times" => '×',
+ "Oslash" => 'Ø', "Ugrave" => 'Ù', "Uacute" => 'Ú', "Ucirc" => 'Û',
+ "Uuml" => 'Ü', "Yacute" => 'Ý', "THORN" => 'Þ', "szlig" => 'ß',
+ "agrave" => 'à', "aacute" => 'á', "acirc" => 'â', "atilde" => 'ã',
+ "auml" => 'ä', "aring" => 'å', "aelig" => 'æ', "ccedil" => 'ç',
+ "egrave" => 'è', "eacute" => 'é', "ecirc" => 'ê', "euml" => 'ë',
+ "igrave" => 'ì', "iacute" => 'í', "icirc" => 'î', "iuml" => 'ï',
+ "eth" => 'ð', "ntilde" => 'ñ', "ograve" => 'ò', "oacute" => 'ó',
+ "ocirc" => 'ô', "otilde" => 'õ', "ouml" => 'ö', "divide" => '÷',
+ "oslash" => 'ø', "ugrave" => 'ù', "uacute" => 'ú', "ucirc" => 'û',
+ "uuml" => 'ü', "yacute" => 'ý', "thorn" => 'þ', "yuml" => 'ÿ',
+
+ # HTML 4 entities that do not have 1:1 Latin1 mappings.
+ "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"",
+ "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->",
+ "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>",
+ "empty" => "Ø", "minus" => "-", "lowast"=> "*", "sim" => "~",
+ "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==",
+ "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">",
+ "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y",
+ "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ",
+ "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`",
+ "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"",
+ "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">",
);
my $report_performance_interval = 60 * 15; # print some stats every 15 minutes
my $http_proxy = undef;
-my $http_timeout = 30;
+my $http_timeout = 20;
my $cvt_timeout = 10;
my $min_width = 50;
my $no_output_p = 0;
my $urls_only_p = 0;
+my $cocoa_p = 0;
+my $imagemap_base = undef;
+
+my @pids_to_kill = (); # forked pids we should kill when we exit, if any.
+
+my $driftnet_magic = 'driftnet';
+my $driftnet_dir = undef;
+my $default_driftnet_cmd = "driftnet -a -m 100";
+
+my $local_magic = 'local-directory';
+my $local_dir = undef;
my $wordlist;
# returns three values: the HTTP response line; the document headers;
# and the document body.
#
-sub get_document_1 {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document_1($$$) {
+ my ($url, $referer, $timeout) = @_;
if (!defined($timeout)) { $timeout = $http_timeout; }
if ($timeout > $http_timeout) { $timeout = $http_timeout; }
my ($url_proto, $dummy, $serverstring, $path) = split(/\//, $url, 4);
$path = "" unless $path;
+ if (!$url_proto || !$serverstring) {
+ LOG (($verbose_net || $verbose_load), "unparsable URL: $url");
+ return ();
+ }
+
my ($them,$port) = split(/:/, $serverstring);
$port = 80 unless $port;
my $port2 = $port;
if ($http_proxy) {
$serverstring = $http_proxy if $http_proxy;
+ $serverstring =~ s@^[a-z]+://@@;
($them2,$port2) = split(/:/, $serverstring);
$port2 = 80 unless $port2;
}
my $cookie = $cookies{$them};
my $user_agent = "$progname/$version";
- if ($url =~ m@^http://www\.altavista\.com/@) {
+
+ if ($url =~ m@^http://www\.altavista\.com/@ ||
+ $url =~ m@^http://random\.yahoo\.com/@ ||
+ $url =~ m@^http://images\.google\.com/@ ||
+ $url =~ m@^http://www\.google\.com/@) {
# block this, you turkeys.
- $user_agent = "Mozilla/4.76 [en] (X11; U; Linux 2.2.16-22 i686; Nav)";
+ $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" .
+ " Gecko/20070914 Firefox/2.0.0.7";
+
+ # 28-Jun-2007: Google Images now emits the entire page in JS if
+ # you claim to be Gecko. They also still block "webcollage".
+ # They serve non-JS for unrecognised agents, so let's try this...
+ $user_agent = "NoJavascriptPlease/1.0"
+ if ($url =~ m@^http://[a-z]+\.google\.com/@);
}
my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" .
print S $hdrs;
my $http = <S> || "";
+ # Kludge: the Yahoo Random Link is now returning as its first
+ # line "Status: 301" instead of "HTTP/1.0 301 Found". Fix it...
+ #
+ $http =~ s@^Status:\s+(\d+)\b@HTTP/1.0 $1@i;
+
$_ = $http;
s/[\r\n]+$//s;
LOG ($verbose_http, " <== $_");
return ();
}
+ $SIG{ALRM} = 'DEFAULT'; # seem to be suffering a race?
return ( $http, $head, $body );
};
die if ($@ && $@ ne "alarm\n"); # propagate errors
+
+ if ($@ && $@ ne "alarm\n") {
+ print STDERR blurb() . "DIE " . join(" ", $@) . "\n";
+ die;
+ }
+
if ($@) {
# timed out
$head = undef;
# returns two values: the document headers; and the document body.
# if the given URL did a redirect, returns the redirected-to document.
#
-sub get_document {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document($$;$) {
+ my ($url, $referer, $timeout) = @_;
my $start = time;
+ if (defined($referer) && $referer eq $driftnet_magic) {
+ return get_driftnet_file ($url);
+ }
+
+ if (defined($referer) && $referer eq $local_magic) {
+ return get_local_file ($url);
+ }
+
my $orig_url = $url;
my $loop_count = 0;
my $max_loop_count = 4;
if ( $http =~ m@^HTTP/[0-9.]+ 30[123]@ ) {
$_ = $head;
+
my ( $location ) = m@^location:[ \t]*(.*)$@im;
if ( $location ) {
$location =~ s/[\r\n]$//;
# in again, but you have to present the old cookie to get the new cookie.
# So, by doing this, the built-in cypherpunks cookie will never go "stale".
#
-sub set_cookie {
+sub set_cookie($$) {
my ($host, $cookie) = @_;
my $oc = $cookies{$host};
return unless $oc;
# given a URL and the body text at that URL, selects and returns a random
# image from it. returns () if no suitable images found.
#
-sub pick_image_from_body {
- my ( $url, $body ) = @_;
+sub pick_image_from_body($$) {
+ my ($url, $body) = @_;
my $base = $url;
$_ = $url;
# randomly from the set of images on the web. All the logic here for
# rejecting some images is really a set of heuristics for rejecting
# images that are not really images: for rejecting *text* that is in
- # GIF/JPEG form. I don't want text, I want pictures, and I want the
- # content of the pictures to be randomly selected from among all the
- # available content.
+ # GIF/JPEG/PNG form. I don't want text, I want pictures, and I want
+ # the content of the pictures to be randomly selected from among all
+ # the available content.
#
# So, filtering out "dirty" pictures by looking for "dirty" keywords
# would be wrong: dirty pictures exist, like it or not, so webcollage
my %unique_urls;
foreach (split(/ *</)) {
- if ( m/^meta /i ) {
+ if ( m/^meta.*["']keywords["']/i ) {
# Likewise, reject any web pages that have a KEYWORDS meta tag
# that is too long.
#
- if (m/name ?= ?\"?keywords\"?/i &&
- m/content ?= ?\"([^\"]+)\"/) {
- my $L = length($1);
- if ($L > 1000) {
- LOG (($verbose_filter || $verbose_load),
- "excessive keywords ($L bytes) in $url: rejecting.");
- $rejected_urls{$url} = $L;
- $body = undef;
- $_ = undef;
- return ();
- } else {
- LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
- }
+ my $L = length($_);
+ if ($L > 1000) {
+ LOG (($verbose_filter || $verbose_load),
+ "excessive keywords ($L bytes) in $url: rejecting.");
+ $rejected_urls{$url} = $L;
+ $body = undef;
+ $_ = undef;
+ return ();
+ } else {
+ LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
}
- } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) {
+ } elsif (m/^ (IMG|A) \b .* (SRC|HREF) \s* = \s* ["']? (.*?) [ "'<>] /six ||
+ m/^ (LINK|META) \b .* (REL|PROPERTY) \s* = \s*
+ ["']? (image_src|og:image) ["']? /six) {
- my $was_inline = ( "$1" eq "a" || "$1" eq "A" );
+ my $was_inline = (lc($1) eq 'img');
+ my $was_meta = (lc($1) eq 'link' || lc($1) eq 'meta');
my $link = $3;
+
+ # For <link rel="image_src" href="...">
+ # and <meta property="og:image" content="...">
+ #
+ if ($was_meta) {
+ next unless (m/ (HREF|CONTENT) \s* = \s* ["']? (.*?) [ "'<>] /six);
+ $link = $2;
+ }
+
my ( $width ) = m/width ?=[ \"]*(\d+)/oi;
my ( $height ) = m/height ?=[ \"]*(\d+)/oi;
$_ = $link;
} elsif ( ! m@^[^/:?]+:@ ) {
$_ = "$base$link";
s@/\./@/@g;
- while (s@/\.\./@/@g) {
- }
+ 1 while (s@/[^/]+/\.\./@/@g);
}
# skip non-http
}
# skip non-image
- if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg)$@io ) {
+ if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@io ) {
next;
}
next;
}
+ # skip images with a URL that indicates a Yahoo thumbnail.
+ if (m@\.yimg\.com/.*/t/@) {
+ if (!$width) { $width = "?"; }
+ if (!$height) { $height = "?"; }
+ LOG ($verbose_filter, " skip yahoo thumb $_ (${width}x$height)");
+ next;
+ }
my $url = $_;
LOG ($verbose_filter,
" image $url" .
($width && $height ? " (${width}x${height})" : "") .
- ($was_inline ? " (inline)" : ""));
+ ($was_meta ? " (meta)" : $was_inline ? " (inline)" : ""));
- $urls[++$#urls] = $url;
- $unique_urls{$url}++;
- # jpegs are preferable to gifs.
- $_ = $url;
- if ( ! m@[.]gif$@io ) {
- $urls[++$#urls] = $url;
+ my $weight = 1;
+
+ if ($was_meta) {
+ $weight = 20; # meta tag images are far preferable to inline images.
+ } else {
+ if ($url !~ m@[.](gif|png)$@io ) {
+ $weight += 2; # JPEGs are preferable to GIFs and PNGs.
+ }
+ if (! $was_inline) {
+ $weight += 4; # pointers to images are preferable to inlined images.
+ }
}
- # pointers to images are preferable to inlined images.
- if ( ! $was_inline ) {
- $urls[++$#urls] = $url;
+ $unique_urls{$url}++;
+ for (my $i = 0; $i < $weight; $i++) {
$urls[++$#urls] = $url;
}
}
}
+# Given a URL and the RSS feed from that URL, pick a random image from
+# the feed. This is a lot simpler than extracting images out of a page:
+# we already know we have reasonable images, so we just pick one.
+# Returns: the real URL of the page (preferably not the RSS version),
+# and the image.
+
+sub pick_image_from_rss($$) {
+ my ( $url, $body ) = @_;
+ my @suitable = ($body =~ m/<enclosure url="(.*?)"/g);
+
+ my ($base) = ($body =~ m@<link>([^<>]+)</link>@i);
+ $base = $url unless $base;
+
+ # pick a random element of the table
+ if (@suitable) {
+ my $i = int(rand(scalar @suitable));
+ my $url = $suitable[$i];
+ LOG ($verbose_load, "picked image " .($i+1) . "/" .
+ ($#suitable+1) . ": $url");
+ return ($base, $url);
+ }
+ return;
+}
+
\f
############################################################################
#
############################################################################
-sub pick_dictionary {
+sub pick_dictionary() {
my @dicts = ("/usr/dict/words",
"/usr/share/dict/words",
- "/usr/share/lib/dict/words");
+ "/usr/share/lib/dict/words",
+ "/usr/share/dict/cracklib-small",
+ "/usr/share/dict/cracklib-words"
+ );
foreach my $f (@dicts) {
if (-f $f) {
$wordlist = $f;
# returns a random word from the dictionary
#
-sub random_word {
- my $word = 0;
- if (open (IN, "<$wordlist")) {
- my $size = (stat(IN))[7];
- my $pos = rand $size;
- if (seek (IN, $pos, 0)) {
- $word = <IN>; # toss partial line
- $word = <IN>; # keep next line
- }
- if (!$word) {
- seek( IN, 0, 0 );
- $word = <IN>;
- }
- close (IN);
- }
+sub random_word() {
+
+ return undef unless open (my $in, '<', $wordlist);
- return 0 if (!$word);
+ my $size = (stat($in))[7];
+ my $word = undef;
+ my $count = 0;
- $word =~ s/^[ \t\n\r]+//;
- $word =~ s/[ \t\n\r]+$//;
- $word =~ s/ys$/y/;
- $word =~ s/ally$//;
- $word =~ s/ly$//;
- $word =~ s/ies$/y/;
- $word =~ s/ally$/al/;
- $word =~ s/izes$/ize/;
- $word =~ tr/A-Z/a-z/;
+ while (1) {
+ error ("looping ($count) while reading $wordlist")
+ if (++$count > 100);
- if ( $word =~ s/[ \t\n\r]/\+/g ) { # convert intra-word spaces to "+".
- $word = "\%22$word\%22"; # And put quotes (%22) around it.
+ my $pos = int (rand ($size));
+ if (seek ($in, $pos, 0)) {
+ $word = <$in>; # toss partial line
+ $word = <$in>; # keep next line
}
- return $word;
+ next unless ($word);
+ next if ($word =~ m/^[-\']/);
+
+ $word = lc($word);
+ $word =~ s/^.*-//s;
+ $word =~ s/^[^a-z]+//s;
+ $word =~ s/[^a-z]+$//s;
+ $word =~ s/\'s$//s;
+ $word =~ s/ys$/y/s;
+ $word =~ s/ally$//s;
+ $word =~ s/ly$//s;
+ $word =~ s/ies$/y/s;
+ $word =~ s/ally$/al/s;
+ $word =~ s/izes$/ize/s;
+ $word =~ s/esses$/ess/s;
+ $word =~ s/(.{5})ing$/$1/s;
+
+ next if (length ($word) > 14);
+ last if ($word);
+ }
+
+ close ($in);
+
+ if ( $word =~ s/\s/\+/gs ) { # convert intra-word spaces to "+".
+ $word = "\%22$word\%22"; # And put quotes (%22) around it.
+ }
+
+ return $word;
}
-sub random_words {
- my ($or_p) = @_;
- my $sep = ($or_p ? "%20OR%20" : "%20");
- return (random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word);
+
+sub random_words($) {
+ my ($sep) = @_;
+ return (random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word());
}
-sub url_quote {
+sub url_quote($) {
my ($s) = @_;
$s =~ s|([^-a-zA-Z0-9.\@/_\r\n])|sprintf("%%%02X", ord($1))|ge;
return $s;
}
-sub url_unquote {
+sub url_unquote($) {
my ($s) = @_;
$s =~ s/[+]/ /g;
$s =~ s/%([a-z0-9]{2})/chr(hex($1))/ige;
return $s;
}
+sub html_quote($) {
+ my ($s) = @_;
+ $s =~ s/&/&/gi;
+ $s =~ s/</</gi;
+ $s =~ s/>/>/gi;
+ $s =~ s/\"/"/gi;
+ return $s;
+}
+
+sub html_unquote($) {
+ my ($s) = @_;
+ $s =~ s/(&([a-z]+);)/{ $entity_table{$2} || $1; }/gexi; # e.g., '
+ $s =~ s/(&\#(\d+);)/{ chr($2) }/gexi; # e.g., '
+ return $s;
+}
+
# Loads the given URL (a search on some search engine) and returns:
# - the total number of hits the search engine claimed it had;
# Note that this list contains all kinds of internal search engine
# junk URLs too -- caller must prune them.
#
-sub pick_from_search_engine {
+sub pick_from_search_engine($$$) {
my ( $timeout, $search_url, $words ) = @_;
$_ = $words;
$search_count = $1;
} elsif ($body =~ m@found about ((\d{1,3})(,\d{3})*|\d+) results@) {
$search_count = $1;
- } elsif ($body =~ m@\b\d+ - \d+ of (\d+)\b@i) { # imagevista
+ } elsif ($body =~ m@\b\d+ - \d+ of (\d+)\b@i) { # avimages
$search_count = $1;
- } elsif ($body =~ m@About ((\d{1,3})(,\d{3})*) images@i) { # imagevista
+ } elsif ($body =~ m@About ((\d{1,3})(,\d{3})*) images@i) { # avimages
$search_count = $1;
} elsif ($body =~ m@We found ((\d{1,3})(,\d{3})*|\d+) results@i) { # *vista
$search_count = $1;
$search_count = $1; # lycos
} elsif ($body =~ m@WEB.*?RESULTS.*?\b((\d{1,3})(,\d{3})*)\b.*?Matches@i) {
$search_count = $1; # hotbot
- } elsif ($body =~ m@no photos were found containing@i) { # imagevista
+ } elsif ($body =~ m@no photos were found containing@i) { # avimages
$search_count = "0";
- } elsif ($body =~ m@found no document matching@i) { # altavista
+ } elsif ($body =~ m@found no document matching@i) { # avtext
$search_count = "0";
}
1 while ($search_count =~ s/^(\d+)(\d{3})/$1,$2/);
# if ($search_count eq "?" || $search_count eq "0") {
-# local *OUT;
# my $file = "/tmp/wc.html";
-# open(OUT, ">$file") || error ("writing $file: $!");
-# print OUT $body;
-# close OUT;
+# open (my $out, '>', $file) || error ("writing $file: $!");
+# print $out $body;
+# close $out;
# print STDERR blurb() . "###### wrote $file\n";
# }
}
-sub depoison {
+sub depoison(@) {
my (@urls) = @_;
my @urls2 = ();
foreach (@urls) {
# given a list of URLs, picks one at random; loads it; and returns a
# random image from it.
-# returns the url of the page loaded; the url of the image chosen;
-# and a debugging description string.
+# returns the url of the page loaded; the url of the image chosen.
#
-sub pick_image_from_pages {
+sub pick_image_from_pages($$$$@) {
my ($base, $total_hit_count, $unfiltered_link_count, $timeout, @pages) = @_;
$total_hit_count = "?" unless defined($total_hit_count);
############################################################################
# yahoorand
-my $yahoo_random_link = "http://random.yahoo.com/bin/ryl";
+my $yahoo_random_link = "http://random.yahoo.com/fast/ryl";
# Picks a random page; picks a random image on that page;
# returns two URLs: the page containing the image, and the image.
# Returns () if nothing found this time.
#
-sub pick_from_yahoo_random_link {
- my ( $timeout ) = @_;
+sub pick_from_yahoo_random_link($) {
+ my ($timeout) = @_;
print STDERR "\n\n" if ($verbose_load);
LOG ($verbose_load, "URL: $yahoo_random_link");
}
}
+\f
+############################################################################
+#
+# Pick images from random pages returned by the Alta Vista Random Link
+# Note: this seems to have gotten a *lot* less random lately (2007).
+#
+############################################################################
+
+# altavista
+my $alta_vista_random_link = "http://www.altavista.com/image/randomlink";
+
+
+# Picks a random page; picks a random image on that page;
+# returns two URLs: the page containing the image, and the image.
+# Returns () if nothing found this time.
+#
+sub pick_from_alta_vista_random_link($) {
+ my ($timeout) = @_;
+
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "URL: $alta_vista_random_link");
+
+ $last_search = $alta_vista_random_link; # for warnings
+
+ $suppress_audit = 1;
+
+ my ( $base, $body ) = get_document ($alta_vista_random_link,
+ undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
+ }
+
+ LOG ($verbose_load, "redirected to: $base");
+
+ my $img = pick_image_from_body ($base, $body);
+ $body = undef;
+
+ if ($img) {
+ return ($base, $img);
+ } else {
+ return ();
+ }
+}
+
\f
############################################################################
#
############################################################################
-my $alta_vista_images_url = "http://www.altavista.com/cgi-bin/query" .
+my $alta_vista_images_url = "http://www.altavista.com/image/results" .
"?ipht=1" . # photos
"&igrph=1" . # graphics
"&iclr=1" . # color
"&ibw=1" . # b&w
"&micat=1" . # no partner sites
- "&imgset=1" . # no partner sites
- "&stype=simage" . # do image search
- "&mmW=1" . # unknown, but required
+ "&sc=on" . # "site collapse"
"&q=";
-# imagevista
-sub pick_from_alta_vista_images {
- my ( $timeout ) = @_;
+# avimages
+sub pick_from_alta_vista_images($) {
+ my ($timeout) = @_;
- my $words = random_words(1);
+ my $words = random_word();
my $page = (int(rand(9)) + 1);
my $search_url = $alta_vista_images_url . $words;
my @candidates = ();
foreach my $u (@subpages) {
- # altavista is encoding their URLs now.
- next unless ($u =~
- m@^/r\?ck_sm=[a-zA-Z0-9]+\&ref=[a-zA-Z0-9]+(\&uid=[a-zA-Z0-9]+)?\&r=(.*)@);
- $u = url_unquote($2);
+ # avimages is encoding their URLs now.
+ next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi);
+ $u = url_unquote($u);
next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins
+ next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots?
next if ($u =~ m@[/.]doubleclick\.net\b@i); # you cretins
next if ($u =~ m@[/.]clicktomarket\.com\b@i); # more cretins
\f
############################################################################
#
-# Pick images by feeding random words into Google Image Search
+# Pick images from Aptix security cameras
+# Cribbed liberally from google image search code.
+# By Jason Sullivan <jasonsul@us.ibm.com>
+#
+############################################################################
+
+my $aptix_images_url = ("http://www.google.com/search" .
+ "?q=inurl:%22jpg/image.jpg%3Fr%3D%22");
+
+# securitycam
+sub pick_from_security_camera($) {
+ my ($timeout) = @_;
+
+ my $page = (int(rand(9)) + 1);
+ my $num = 20; # 20 images per page
+ my $search_url = $aptix_images_url;
+
+ if ($page > 1) {
+ $search_url .= "&start=" . $page*$num; # page number
+ $search_url .= "&num=" . $num; #images per page
+ }
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, '');
+
+ my @candidates = ();
+ my %referers;
+ foreach my $u (@subpages) {
+ next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins (most links)
+ next unless ($u =~ m@jpg/image.jpg\?r=@i); # All pics contain this
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ $referers{$u} = $u;
+ }
+
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Google Image Search.
# By Charles Gales <gales@us.ibm.com>
#
############################################################################
"&q=";
# googleimgs
-sub pick_from_google_images {
- my ( $timeout ) = @_;
+sub pick_from_google_images($;$$) {
+ my ($timeout, $words, $max_page) = @_;
+
+ if (!defined($words)) {
+ $words = random_word(); # only one word for Google
+ }
- my $words = random_word; # only one word for Google
my $page = (int(rand(9)) + 1);
my $num = 20; # 20 images per page
my $search_url = $google_images_url . $words;
pick_from_search_engine ($timeout, $search_url, $words);
my @candidates = ();
+ my %referers;
foreach my $u (@subpages) {
next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this
next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins
- if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) {
- my $urlf = $2;
- LOG ($verbose_filter, " candidate: $urlf");
- push @candidates, $urlf;
+ $u = html_unquote($u);
+ if ($u =~ m@^/imgres\?imgurl=(.*?)&imgrefurl=(.*?)\&@) {
+ my $ref = $2;
+ my $img = $1;
+ $img = "http://$img" unless ($img =~ m/^http:/i);
+
+ $ref = url_decode($ref);
+ $img = url_decode($img);
+
+ LOG ($verbose_filter, " candidate: $ref");
+ push @candidates, $img;
+ $referers{$img} = $ref;
}
}
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random numbers into Google Image Search.
+# By jwz, suggested by Ian O'Donnell.
+#
+############################################################################
+
+
+# googlenums
+sub pick_from_google_image_numbers($) {
+ my ($timeout) = @_;
+
+ my $max = 9999;
+ my $number = int(rand($max));
+
+ $number = sprintf("%04d", $number)
+ if (rand() < 0.3);
+
+ pick_from_google_images ($timeout, "$number");
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random digital camera file names into
+# Google Image Search.
+# By jwz, inspired by the excellent Random Personal Picture Finder
+# at http://www.diddly.com/random/
+#
+############################################################################
+
+my @photomakers = (
+ #
+ # Common digital camera file name formats, as described at
+ # http://www.diddly.com/random/about.html
+ #
+ sub { sprintf ("dcp%05d.jpg", int(rand(4000))); }, # Kodak
+ sub { sprintf ("dsc%05d.jpg", int(rand(4000))); }, # Nikon
+ sub { sprintf ("dscn%04d.jpg", int(rand(4000))); }, # Nikon
+ sub { sprintf ("mvc-%03d.jpg", int(rand(999))); }, # Sony Mavica
+ sub { sprintf ("mvc%05d.jpg", int(rand(9999))); }, # Sony Mavica
+ sub { sprintf ("P101%04d.jpg", int(rand(9999))); }, # Olympus w/ date=101
+ sub { sprintf ("P%x%02d%04d.jpg", # Olympus
+ int(rand(0xC)), int(rand(30))+1,
+ rand(9999)); },
+ sub { sprintf ("IMG_%03d.jpg", int(rand(999))); }, # ?
+ sub { sprintf ("IMAG%04d.jpg", int(rand(9999))); }, # RCA and Samsung
+ sub { my $n = int(rand(9999)); # Canon
+ sprintf ("1%02d-%04d.jpg", int($n/100), $n); },
+ sub { my $n = int(rand(9999)); # Canon
+ sprintf ("1%02d-%04d_IMG.jpg",
+ int($n/100), $n); },
+ sub { sprintf ("IMG_%04d.jpg", int(rand(9999))); }, # Canon
+ sub { sprintf ("dscf%04d.jpg", int(rand(9999))); }, # Fuji Finepix
+ sub { sprintf ("pdrm%04d.jpg", int(rand(9999))); }, # Toshiba PDR
+ sub { sprintf ("IM%06d.jpg", int(rand(9999))); }, # HP Photosmart
+ sub { sprintf ("EX%06d.jpg", int(rand(9999))); }, # HP Photosmart
+# sub { my $n = int(rand(3)); # Kodak DC-40,50,120
+# sprintf ("DC%04d%s.jpg", int(rand(9999)),
+# $n == 0 ? 'S' : $n == 1 ? 'M' : 'L'); },
+ sub { sprintf ("pict%04d.jpg", int(rand(9999))); }, # Minolta Dimage
+ sub { sprintf ("P%07d.jpg", int(rand(9999))); }, # Kodak DC290
+# sub { sprintf ("%02d%02d%04d.jpg", # Casio QV3000, QV4000
+# int(rand(12))+1, int(rand(31))+1,
+# int(rand(999))); },
+# sub { sprintf ("%02d%x%02d%04d.jpg", # Casio QV7000
+# int(rand(6)), # year
+# int(rand(12))+1, int(rand(31))+1,
+# int(rand(999))); },
+ sub { sprintf ("IMGP%04d.jpg", int(rand(9999))); }, # Pentax Optio S
+ sub { sprintf ("PANA%04d.jpg", int(rand(9999))); }, # Panasonic vid still
+ sub { sprintf ("HPIM%04d.jpg", int(rand(9999))); }, # HP Photosmart
+ sub { sprintf ("PCDV%04d.jpg", int(rand(9999))); }, # ?
+ );
+
+
+# googlephotos
+sub pick_from_google_image_photos($) {
+ my ($timeout) = @_;
+
+ my $i = int(rand($#photomakers + 1));
+ my $fn = $photomakers[$i];
+ my $file = &$fn;
+ my $words .= $file . "%20filetype:jpg";
+
+ pick_from_google_images ($timeout, $words);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Alta Vista Text Search
+#
+############################################################################
+
+
+my $alta_vista_url = "http://www.altavista.com/web/results" .
+ "?pg=aq" .
+ "&aqmode=s" .
+ "&filetype=html" .
+ "&sc=on" . # "site collapse"
+ "&nbq=50" .
+ "&aqo=";
+
+# avtext
+sub pick_from_alta_vista_text($) {
+ my ($timeout) = @_;
+
+ my $words = random_words('%20');
+ my $page = (int(rand(9)) + 1);
+ my $search_url = $alta_vista_url . $words;
+
+ if ($page > 1) {
+ $search_url .= "&pgno=" . $page;
+ $search_url .= "&stq=" . (($page-1) * 10);
+ }
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Those altavista fuckers are playing really nasty redirection games
+ # these days: the filter your clicks through their site, but use
+ # onMouseOver to make it look like they're not! Well, it makes it
+ # easier for us to identify search results...
+ #
+ next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi);
+ $u = url_unquote($u);
+
+ next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins
+ next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots?
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
$timeout, @candidates);
}
\f
############################################################################
#
-# Pick images by feeding random words into Alta Vista Text Search
+# Pick images by feeding random words into Hotbot
+#
+############################################################################
+
+my $hotbot_search_url =("http://hotbot.lycos.com/default.asp" .
+ "?ca=w" .
+ "&descriptiontype=0" .
+ "&imagetoggle=1" .
+ "&matchmode=any" .
+ "&nummod=2" .
+ "&recordcount=50" .
+ "&sitegroup=1" .
+ "&stem=1" .
+ "&cobrand=undefined" .
+ "&query=");
+
+sub pick_from_hotbot_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $hotbot_search_url; # for warnings
+
+ # lycos seems to always give us back dictionaries and word lists if
+ # we search for more than one word...
+ #
+ my $words = random_word();
+
+ my $start = int(rand(8)) * 10 + 1;
+ my $search_url = $hotbot_search_url . $words . "&first=$start&page=more";
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Hotbot plays redirection games too
+ # (not any more?)
+# next unless ($u =~ m@/director.asp\?.*\btarget=([^&]+)@);
+# $u = url_decode($1);
+
+ next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]hotbot\.com\b@i); # skip hotbot builtins
+ next if ($u =~ m@[/.]lycos\.com\b@i); # skip hotbot builtins
+ next if ($u =~ m@[/.]inktomi\.com\b@i); # skip hotbot builtins
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into Lycos
+#
+############################################################################
+
+my $lycos_search_url = "http://search.lycos.com/default.asp" .
+ "?lpv=1" .
+ "&loc=searchhp" .
+ "&tab=web" .
+ "&query=";
+
+sub pick_from_lycos_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $lycos_search_url; # for warnings
+
+ # lycos seems to always give us back dictionaries and word lists if
+ # we search for more than one word...
+ #
+ my $words = random_word();
+
+ my $start = int(rand(8)) * 10 + 1;
+ my $search_url = $lycos_search_url . $words . "&first=$start&page=more";
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Lycos plays redirection games.
+ # (not any more?)
+# next unless ($u =~ m@^http://click.lycos.com/director.asp
+# .*
+# \btarget=([^&]+)
+# .*
+# @x);
+# $u = url_decode($1);
+
+ next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]hotbot\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]lycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]terralycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]inktomi\.com\b@i); # skip lycos builtins
+
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into news.yahoo.com
+#
+############################################################################
+
+my $yahoo_news_url = "http://news.search.yahoo.com/search/news" .
+ "?c=news_photos" .
+ "&p=";
+
+# yahoonews
+sub pick_from_yahoo_news_text($) {
+ my ($timeout) = @_;
+
+ $last_search = $yahoo_news_url; # for warnings
+
+ my $words = random_word();
+ my $search_url = $yahoo_news_url . $words;
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # de-redirectize the URLs
+ $u =~ s@^http://rds\.yahoo\.com/.*-http%3A@http:@s;
+
+ # only accept URLs on Yahoo's news site
+ next unless ($u =~ m@^http://dailynews\.yahoo\.com/@i ||
+ $u =~ m@^http://story\.news\.yahoo\.com/@i);
+ next unless ($u =~ m@&u=/@);
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images from LiveJournal's list of recently-posted images.
+#
+############################################################################
+
+my $livejournal_img_url = "http://www.livejournal.com/stats/latest-img.bml";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of LiveJournal, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $lj_cache_size = 1000;
+my @lj_cache = (); # fifo, for ordering by age
+my %lj_cache = (); # hash, for detecting dups
+
+# livejournal
+sub pick_from_livejournal_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $livejournal_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($livejournal_img_url, undef, $timeout);
+ return () unless $body;
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<recent-image)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+ next unless (m/^<recent-image\b/);
+ next unless (m/\bIMG=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $img = html_unquote ($1);
+
+ next if ($lj_cache{$img}); # already have it
+
+ next unless (m/\bURL=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $page = html_unquote ($1);
+ my @pair = ($img, $page);
+ LOG ($verbose_filter, " candidate: $img");
+ push @lj_cache, \@pair;
+ $lj_cache{$img} = \@pair;
+ }
+
+ return () if ($#lj_cache == -1);
+
+ my $n = $#lj_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$lj_cache[$i]};
+
+ # delete this one from @lj_cache and from %lj_cache.
+ #
+ @lj_cache = ( @lj_cache[0 .. $i-1],
+ @lj_cache[$i+1 .. $#lj_cache] );
+ delete $lj_cache{$img};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#lj_cache >= $lj_cache_size) {
+ my $pairP = shift @lj_cache;
+ my $img = $pairP->[0];
+ delete $lj_cache{$img};
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from ircimages.com (images that have been in the /topic of
+# various IRC channels.)
+#
+############################################################################
+
+my $ircimages_url = "http://ircimages.com/";
+
+# ircimages
+sub pick_from_ircimages($) {
+ my ($timeout) = @_;
+
+ $last_search = $ircimages_url; # for warnings
+
+ my $n = int(rand(2900));
+ my $search_url = $ircimages_url . "page-$n";
+
+ my ( $base, $body ) = get_document ($search_url, undef, $timeout);
+ return () unless $body;
+
+ my @candidates = ();
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<A)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+
+ my ($u) = m@<A\s.*\bHREF\s*=\s*([^>]+)>@i;
+ next unless $u;
+
+ if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string
+ elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token
+
+ next unless ($u =~ m/^http:/i);
+ next if ($u =~ m@^http://(searchirc\.com\|ircimages\.com)@i);
+ next unless ($u =~ m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@i);
+
+ LOG ($verbose_http, " HREF: $u");
+ push @candidates, $u;
+ }
+
+ LOG ($verbose_filter, "" . $#candidates+1 . " links on $search_url");
+
+ return () if ($#candidates == -1);
+
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#candidates+1) .
+ ": $img");
+
+ $search_url = $img; # hmm...
+ return ($search_url, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Twitpic's list of recently-posted images.
+#
+############################################################################
+
+my $twitpic_img_url = "http://twitpic.com/public_timeline/feed.rss";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitpic, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitpic_cache_size = 1000;
+my @twitpic_cache = (); # fifo, for ordering by age
+my %twitpic_cache = (); # hash, for detecting dups
+
+# twitpic
+sub pick_from_twitpic_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitpic_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitpic_img_url, undef, $timeout);
+
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<item)\b/\n$1/gsi;
+
+ my @items = split (/\n/, $body);
+ shift @items;
+ foreach (@items) {
+ next unless (m@<link>([^<>]*)</link>@si);
+ my $page = html_unquote ($1);
+
+ $page =~ s@/$@@s;
+ $page .= '/full';
+
+ next if ($twitpic_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page");
+ push @twitpic_cache, $page;
+ $twitpic_cache{$page} = $page;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitpic_cache == -1);
+
+ my $n = $#twitpic_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitpic_cache[$i];
+
+ # delete this one from @twitpic_cache and from %twitpic_cache.
+ #
+ @twitpic_cache = ( @twitpic_cache[0 .. $i-1],
+ @twitpic_cache[$i+1 .. $#twitpic_cache] );
+ delete $twitpic_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitpic_cache >= $twitpic_cache_size) {
+ my $page = shift @twitpic_cache;
+ delete $twitpic_cache{$page};
+ }
+
+ ( $base, $body ) = get_document ($page, undef, $timeout);
+ my $img = undef;
+ $body = '' unless defined($body);
+
+ foreach (split (/<img\s+/, $body)) {
+ my ($src) = m/\bsrc=[\"\'](.*?)[\"\']/si;
+ next unless $src;
+ next if m@/js/@s;
+ next if m@/images/@s;
+
+ $img = $src;
+
+ $img = "http:$img" if ($img =~ m@^//@s); # Oh come on
+
+ # Sometimes these images are hosted on twitpic, sometimes on Amazon.
+ if ($img =~ m@^/@) {
+ $base =~ s@^(https?://[^/]+)/.*@$1@s;
+ $img = $base . $img;
+ }
+ last;
+ }
+
+ if (!$img) {
+ LOG ($verbose_load, "no matching images on $page\n");
+ return ();
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Twitter's list of recently-posted updates.
+#
+############################################################################
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitter, the page
+# of images only updates once a minute; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitter_img_url = "http://api.twitter.com/1/statuses/" .
+ "public_timeline.json" .
+ "?include_entities=true" .
+ "&include_rts=true" .
+ "&count=200";
+
+my $twitter_cache_size = 1000;
+
+my @twitter_cache = (); # fifo, for ordering by age
+my %twitter_cache = (); # hash, for detecting dups
+
+
+# twitter
+sub pick_from_twitter_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitter_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitter_img_url, undef, $timeout);
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/[\r\n]+/ /gs;
+
+ # Parsing JSON is a pain in the ass. So we halfass it as usual.
+ $body =~ s/^\[|\]$//s;
+ $body =~ s/(\[.*?\])/{ $_ = $1; s@\},@\} @gs; $_; }/gsexi;
+ my @items = split (/},{/, $body);
+ foreach (@items) {
+ my ($name) = m@"screen_name":"([^\"]+)"@si;
+ my ($img) = m@"media_url":"([^\"]+)"@si;
+ my ($page) = m@"display_url":"([^\"]+)"@si;
+ next unless ($name && $img && $page);
+ foreach ($img, $page) {
+ s/\\//gs;
+ $_ = "http://$_" unless (m/^http/si);
+ }
+
+ next if ($twitter_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page - $img");
+ push @twitter_cache, $page;
+ $twitter_cache{$page} = $img;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitter_cache == -1);
+
+ my $n = $#twitter_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitter_cache[$i];
+ my $url = $twitter_cache{$page};
+
+ # delete this one from @twitter_cache and from %twitter_cache.
+ #
+ @twitter_cache = ( @twitter_cache[0 .. $i-1],
+ @twitter_cache[$i+1 .. $#twitter_cache] );
+ delete $twitter_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitter_cache >= $twitter_cache_size) {
+ my $page = shift @twitter_cache;
+ delete $twitter_cache{$page};
+ }
+
+ LOG ($verbose_load, "picked page $url");
+
+ $suppress_audit = 1;
+
+ return ($page, $url);
+}
+
+\f
+############################################################################
+#
+# Pick images from Flickr's page of recently-posted photos.
#
############################################################################
+my $flickr_img_url = "http://www.flickr.com/photos/";
-my $alta_vista_url_1 = "http://www.altavista.com/cgi-bin/query?pg=q" .
- "&text=yes&kl=XX&stype=stext&q=";
-my $alta_vista_url_2 = "http://www.altavista.com/sites/search/web?pg=q" .
- "&kl=XX&search=Search&q=";
+# Like LiveJournal, the Flickr page of images tends to update slowly,
+# so remember the last N entries on it and randomly select from those.
-my $alta_vista_url = $alta_vista_url_2;
+# I know that Flickr has an API (http://www.flickr.com/services/api/)
+# but it was easy enough to scrape the HTML, so I didn't bother exploring.
-# altavista
-sub pick_from_alta_vista_text {
- my ( $timeout ) = @_;
+my $flickr_cache_size = 1000;
+my @flickr_cache = (); # fifo, for ordering by age
+my %flickr_cache = (); # hash, for detecting dups
- my $words = random_words(1);
- my $page = (int(rand(9)) + 1);
- my $search_url = $alta_vista_url . $words;
- if ($page > 1) {
- $search_url .= "&pgno=" . $page;
- $search_url .= "&stq=" . (($page-1) * 10);
+# flickr_recent
+sub pick_from_flickr_recent($) {
+ my ($timeout) = @_;
+
+ my $start = 16 * int(rand(100));
+
+ $last_search = $flickr_img_url; # for warnings
+ $last_search .= "?start=$start" if ($start > 0);
+
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ return () unless $body;
+
+ $body =~ s/[\r\n]/ /gs;
+ $body =~ s/(<a)\b/\n$1/gsi;
+
+ my $count = 0;
+ my $count2 = 0;
+ foreach (split (/\n/, $body)) {
+ my ($page, $thumb) = m@<A \s [^<>]* \b HREF=\"([^<>\"]+)\" [^<>]* > \s*
+ <IMG \s [^<>]* \b SRC=\"([^<>\"]+)\" @xsi;
+ next unless defined ($thumb);
+ $page = html_unquote ($page);
+ $thumb = html_unquote ($thumb);
+
+ next unless ($thumb =~ m@^http://farm\d*\.static\.?flickr\.com/@);
+
+ my $base = "http://www.flickr.com/";
+ $page =~ s@^/@$base@;
+ $thumb =~ s@^/@$base@;
+
+ my $img = $thumb;
+ $img =~ s/_[a-z](\.[a-z\d]+)$/$1/si; # take off "thumb" suffix
+
+ $count++;
+ next if ($flickr_cache{$img}); # already have it
+
+ my @pair = ($img, $page, $start);
+ LOG ($verbose_filter, " candidate: $img");
+ push @flickr_cache, \@pair;
+ $flickr_cache{$img} = \@pair;
+ $count2++;
}
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ return () if ($#flickr_cache == -1);
- my @candidates = ();
- foreach my $u (@subpages) {
+ my $n = $#flickr_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$flickr_cache[$i]};
- # Those altavista fuckers are playing really nasty redirection games
- # these days: the filter your clicks through their site, but use
- # onMouseOver to make it look like they're not! Well, it makes it
- # easier for us to identify search results...
- #
- next unless ($u =~
- m@^/r\?ck_sm=[a-zA-Z0-9]+\&ref=[a-zA-Z0-9]+\&uid=[a-zA-Z0-9]+\&r=(.*)@);
- $u = url_unquote($1);
+ # delete this one from @flickr_cache and from %flickr_cache.
+ #
+ @flickr_cache = ( @flickr_cache[0 .. $i-1],
+ @flickr_cache[$i+1 .. $#flickr_cache] );
+ delete $flickr_cache{$img};
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#flickr_cache >= $flickr_cache_size) {
+ my $pairP = shift @flickr_cache;
+ my $img = $pairP->[0];
+ delete $flickr_cache{$img};
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+ return ($page, $img);
+}
\f
############################################################################
#
-# Pick images by feeding random words into Hotbot
+# Pick images from a random RSS feed on Flickr.
#
############################################################################
-my $hotbot_search_url = "http://hotbot.lycos.com/" .
- "?SM=SC" .
- "&DV=0" .
- "&LG=any" .
- "&FVI=1" .
- "&DC=100" .
- "&DE=0" .
- "&SQ=1" .
- "&TR=13" .
- "&AM1=MC" .
- "&MT=";
+my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne".
+ "?format=rss_200_enc&tagmode=any&tags=");
-sub pick_from_hotbot_text {
- my ( $timeout ) = @_;
+# Picks a random RSS feed; picks a random image from that feed;
+# returns 2 URLs: the page containing the image, and the image.
+# Mostly by Joe Mcmahon <mcmahon@yahoo-inc.com>
+#
+# flickr_random
+sub pick_from_flickr_random($) {
+ my $timeout = shift;
- my $words = random_words(0);
- my $search_url = $hotbot_search_url . $words;
+ my $words = random_words(',');
+ my $rss = $flickr_rss_base . $words;
+ $last_search = $rss;
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ $_ = $words;
+ s/,/ /g;
- my @candidates = ();
- foreach my $u (@subpages) {
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "words: $_");
+ LOG ($verbose_load, "URL: $last_search");
- # Hotbot plays redirection games too
- next unless ($u =~ m@^/director.asp\?target=([^&]+)@);
- $u = url_decode($1);
+ $suppress_audit = 1;
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
-}
+ my $img;
+ ($base, $img) = pick_image_from_rss ($base, $body);
+ $body = undef;
+ return () unless defined ($img);
+ LOG ($verbose_load, "redirected to: $base");
+ return ($base, $img);
+}
\f
############################################################################
#
-# Pick images by feeding random words into Lycos
+# Pick images by waiting for driftnet to populate a temp dir with files.
+# Requires driftnet version 0.1.5 or later.
+# (Driftnet is a program by Chris Lightfoot that sniffs your local ethernet
+# for images being downloaded by others.)
+# Driftnet/webcollage integration by jwz.
#
############################################################################
-my $lycos_search_url = "http://lycospro.lycos.com/srchpro/" .
- "?lpv=1" .
- "&t=any" .
- "&query=";
-
-sub pick_from_lycos_text {
- my ( $timeout ) = @_;
+# driftnet
+sub pick_from_driftnet($) {
+ my ($timeout) = @_;
- my $words = random_words(0);
- my $start = int(rand(8)) * 10 + 1;
- my $search_url = $lycos_search_url . $words . "&start=$start";
+ my $id = $driftnet_magic;
+ my $dir = $driftnet_dir;
+ my $start = time;
+ my $now;
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
+ error ("\$driftnet_dir unset?") unless ($dir);
+ $dir =~ s@/+$@@;
- my @candidates = ();
- foreach my $u (@subpages) {
+ error ("$dir unreadable") unless (-d "$dir/.");
- # Lycos plays exact the same redirection game as hotbot.
- # Note that "id=0" is used for internal advertising links,
- # and 1+ are used for search results.
- next unless ($u =~ m@^http://click.hotbot.com/director.asp\?id=[1-9]\d*&target=([^&]+)@);
- $u = url_decode($1);
+ $timeout = $http_timeout unless ($timeout);
+ $last_search = $id;
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ while ($now = time, $now < $start + $timeout) {
+ opendir (my $dir, $dir) || error ("$dir: $!");
+ while (my $file = readdir($dir)) {
+ next if ($file =~ m/^\./);
+ $file = "$dir/$file";
+ closedir ($dir);
+ LOG ($verbose_load, "picked file $file ($id)");
+ return ($id, $file);
+ }
+ closedir ($dir);
}
-
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
+ LOG (($verbose_net || $verbose_load), "timed out for $id");
+ return ();
}
-\f
-############################################################################
-#
-# Pick images by feeding random words into news.yahoo.com
-#
-############################################################################
+sub get_driftnet_file($) {
+ my ($file) = @_;
-my $yahoo_news_url = "http://search.news.yahoo.com/search/news_photos?" .
- "&z=&n=100&o=o&2=&3=&p=";
+ error ("\$driftnet_dir unset?") unless ($driftnet_dir);
-# yahoonews
-sub pick_from_yahoo_news_text {
- my ( $timeout ) = @_;
+ my $id = $driftnet_magic;
+ error ("$id: $file not in $driftnet_dir?")
+ unless ($file =~ m@^\Q$driftnet_dir@o);
- my $words = random_words(1);
- my $search_url = $yahoo_news_url . $words;
+ open (my $in, '<', $file) || error ("$id: $file: $!");
+ my $body = '';
+ local $/ = undef; # read entire file
+ $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
+ unlink ($file) || error ("$id: $file: rm: $!");
+ return ($id, $body);
+}
- my ($search_hit_count, @subpages) =
- pick_from_search_engine ($timeout, $search_url, $words);
- my @candidates = ();
- foreach my $u (@subpages) {
- # only accept URLs on Yahoo's news site
- next unless ($u =~ m@^http://dailynews\.yahoo\.com/@i ||
- $u =~ m@^http://story\.news\.yahoo\.com/@i);
+sub spawn_driftnet($) {
+ my ($cmd) = @_;
- LOG ($verbose_filter, " candidate: $u");
- push @candidates, $u;
+ # make a directory to use.
+ while (1) {
+ my $tmp = $ENV{TEMPDIR} || "/tmp";
+ $driftnet_dir = sprintf ("$tmp/driftcollage-%08x", rand(0xffffffff));
+ LOG ($verbose_exec, "mkdir $driftnet_dir");
+ last if mkdir ($driftnet_dir, 0700);
}
- return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
- $timeout, @candidates);
+ if (! ($cmd =~ m/\s/)) {
+ # if the command didn't have any arguments in it, then it must be just
+ # a pointer to the executable. Append the default args to it.
+ my $dargs = $default_driftnet_cmd;
+ $dargs =~ s/^[^\s]+//;
+ $cmd .= $dargs;
+ }
+
+ # point the driftnet command at our newly-minted private directory.
+ #
+ $cmd .= " -d $driftnet_dir";
+ $cmd .= ">/dev/null" unless ($verbose_exec);
+
+ my $pid = fork();
+ if ($pid < 0) { error ("fork: $!\n"); }
+ if ($pid) {
+ # parent fork
+ push @pids_to_kill, $pid;
+ LOG ($verbose_exec, "forked for \"$cmd\"");
+ } else {
+ # child fork
+ nontrapping_system ($cmd) || error ("exec: $!");
+ }
+
+ # wait a bit, then make sure the process actually started up.
+ #
+ sleep (1);
+ error ("pid $pid failed to start \"$cmd\"")
+ unless (1 == kill (0, $pid));
+}
+
+# local-directory
+sub pick_from_local_dir($) {
+ my ($timeout) = @_;
+
+ my $id = $local_magic;
+ $last_search = $id;
+
+ my $dir = $local_dir;
+ error ("\$local_dir unset?") unless ($dir);
+ $dir =~ s@/+$@@;
+
+ error ("$dir unreadable") unless (-d "$dir/.");
+
+ my $v = ($verbose_exec ? "-v" : "");
+ my $pick = `xscreensaver-getimage-file $v "$dir"`;
+ $pick =~ s/\s+$//s;
+ $pick = "$dir/$pick" unless ($pick =~ m@^/@s); # relative path
+
+ LOG ($verbose_load, "picked file $pick ($id)");
+ return ($id, $pick);
}
+sub get_local_file($) {
+ my ($file) = @_;
+
+ error ("\$local_dir unset?") unless ($local_dir);
+
+ my $id = $local_magic;
+ error ("$id: $file not in $local_dir?")
+ unless ($file =~ m@^\Q$local_dir@o);
+
+ open (my $in, '<', $file) || error ("$id: $file: $!");
+ local $/ = undef; # read entire file
+ my $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
+ return ($id, $body);
+}
+
\f
############################################################################
# Picks a random image on a random page, and returns two URLs:
# the page containing the image, and the image.
# Returns () if nothing found this time.
-# Uses the url-randomizer 1 time in 5, else the image randomizer.
#
-sub pick_image {
- my ( $timeout ) = @_;
+sub pick_image(;$) {
+ my ($timeout) = @_;
$current_state = "select";
$load_method = "none";
#
############################################################################
-sub timestr {
+sub timestr() {
return strftime ("%H:%M:%S: ", localtime);
}
-sub blurb {
+sub blurb() {
return "$progname: " . timestr() . "$current_state: ";
}
-sub error {
+sub error($) {
my ($err) = @_;
print STDERR blurb() . "$err\n";
exit 1;
}
+sub stacktrace() {
+ my $i = 1;
+ print STDERR "$progname: stack trace:\n";
+ while (1) {
+ my ($package, $filename, $line, $subroutine) = caller($i++);
+ last unless defined($package);
+ $filename =~ s@^.*/@@;
+ print STDERR " $filename#$line, $subroutine\n";
+ }
+}
+
my $lastlog = "";
-sub clearlog {
+sub clearlog() {
$lastlog = "";
}
-sub showlog {
+sub showlog() {
my $head = "$progname: DEBUG: ";
foreach (split (/\n/, $lastlog)) {
print STDERR "$head$_\n";
$lastlog = "";
}
-sub LOG {
+sub LOG($$) {
my ($print, $msg) = @_;
my $blurb = timestr() . "$current_state: ";
$lastlog .= "$blurb$msg\n";
my %stats_elapsed;
my $last_state = undef;
-sub record_attempt {
+sub record_attempt($) {
my ($name) = @_;
if ($last_state) {
$suppress_audit = 0;
}
-sub record_success {
+sub record_success($$$) {
my ($name, $url, $base) = @_;
if (defined($stats_successes{$name})) {
$stats_successes{$name}++;
}
-sub record_failure {
+sub record_failure($) {
my ($name) = @_;
return if $image_succeeded;
-sub stats_of {
+sub stats_of($) {
my ($name) = @_;
my $i = $stats_successes{$name};
my $j = $stats_attempts{$name};
my $current_start_time = 0;
-sub start_timer {
+sub start_timer($) {
my ($name) = @_;
$current_start_time = time;
}
}
-sub stop_timer {
+sub stop_timer($$) {
my ($name, $success) = @_;
$stats_elapsed{$name} += time - $current_start_time;
}
my $last_report_time = 0;
-sub report_performance {
+sub report_performance() {
return unless $verbose_warnings;
my $suc = $stats_successes{$name} || 0;
my $pct = int($suc * 100 / $try);
my $secs = $stats_elapsed{$name};
- my $secs_link = int($secs / $try);
- print STDERR sprintf ("$blurb %-12s %4s (%d/%d);\t %2d secs/link\n",
+ my $secs_link = $secs / $try;
+ print STDERR sprintf ("$blurb %-14s %4s (%d/%d);" .
+ " \t %.1f secs/link\n",
"$name:", "$pct%", $suc, $try, $secs_link);
}
}
my @recent_images = ();
my @recent_sites = ();
-sub save_recent_url {
+sub save_recent_url($$) {
my ($url, $base) = @_;
return unless ($verbose_warnings);
$_ = $url;
my ($site) = m@^http://([^ \t\n\r/:]+)@;
+ return unless defined ($site);
+
+ if ($base eq $driftnet_magic || $base eq $local_magic) {
+ $site = $base;
+ @recent_images = ();
+ }
my $done = 0;
foreach (@recent_images) {
# Does %-decoding.
#
-sub url_decode {
+sub url_decode($) {
($_) = @_;
tr/+/ /;
s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
# Given the raw body of a GIF document, returns the dimensions of the image.
#
-sub gif_size {
+sub gif_size($) {
my ($body) = @_;
my $type = substr($body, 0, 6);
my $s;
return () unless ($type =~ /GIF8[7,9]a/);
$s = substr ($body, 6, 10);
my ($a,$b,$c,$d) = unpack ("C"x4, $s);
+ return () unless defined ($d);
return (($b<<8|$a), ($d<<8|$c));
}
# Given the raw body of a JPEG document, returns the dimensions of the image.
#
-sub jpeg_size {
+sub jpeg_size($) {
my ($body) = @_;
my $i = 0;
my $L = length($body);
while (ord($ch) != 0xDA && $i < $L) {
# Find next marker, beginning with 0xFF.
while (ord($ch) != 0xFF) {
+ return () if (length($body) <= $i);
$ch = substr($body, $i, 1); $i++;
}
# markers can be padded with any number of 0xFF.
while (ord($ch) == 0xFF) {
+ return () if (length($body) <= $i);
$ch = substr($body, $i, 1); $i++;
}
($marker != 0xC4) &&
($marker != 0xCC)) { # it's a SOFn marker
$i += 3;
+ return () if (length($body) <= $i);
my $s = substr($body, $i, 4); $i += 4;
my ($a,$b,$c,$d) = unpack("C"x4, $s);
return (($c<<8|$d), ($a<<8|$b));
} else {
# We must skip variables, since FFs in variable names aren't
# valid JPEG markers.
+ return () if (length($body) <= $i);
my $s = substr($body, $i, 2); $i += 2;
my ($c1, $c2) = unpack ("C"x2, $s);
my $length = ($c1 << 8) | $c2;
return ();
}
-# Given the raw body of a GIF or JPEG document, returns the dimensions of
-# the image.
+# Given the raw body of a PNG document, returns the dimensions of the image.
+#
+sub png_size($) {
+ my ($body) = @_;
+ return () unless ($body =~ m/^\211PNG\r/);
+ my ($bits) = ($body =~ m/^.{12}(.{12})/s);
+ return () unless defined ($bits);
+ return () unless ($bits =~ /^IHDR/);
+ my ($ign, $w, $h) = unpack("a4N2", $bits);
+ return ($w, $h);
+}
+
+
+# Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions
+# of the image.
#
-sub image_size {
+sub image_size($) {
my ($body) = @_;
my ($w, $h) = gif_size ($body);
if ($w && $h) { return ($w, $h); }
- return jpeg_size ($body);
+ ($w, $h) = jpeg_size ($body);
+ if ($w && $h) { return ($w, $h); }
+ return png_size ($body);
}
# returns the full path of the named program, or undef.
#
-sub which {
+sub which($) {
my ($prog) = @_;
foreach (split (/:/, $ENV{PATH})) {
if (-x "$_/$prog") {
# Like rand(), but chooses numbers with a bell curve distribution.
-sub bellrand {
+sub bellrand(;$) {
($_) = @_;
$_ = 1.0 unless defined($_);
$_ /= 3.0;
}
+sub exit_cleanup() {
+ x_cleanup();
+ print STDERR "$progname: exiting\n" if ($verbose_warnings);
+ if (@pids_to_kill) {
+ print STDERR blurb() . "killing: " . join(' ', @pids_to_kill) . "\n";
+ kill ('TERM', @pids_to_kill);
+ }
+}
+
+sub signal_cleanup($) {
+ my ($sig) = @_;
+ print STDERR blurb() . (defined($sig)
+ ? "caught signal $sig."
+ : "exiting.")
+ . "\n"
+ if ($verbose_exec || $verbose_warnings);
+ exit 1;
+}
+
+
+
##############################################################################
#
# Generating a list of urls only
#
##############################################################################
-sub url_only_output {
+sub url_only_output() {
do {
my ($base, $img) = pick_image;
if ($img) {
##############################################################################
#
-# Running as an xscreensaver module
+# Running as an xscreensaver module, or as a web page imagemap
#
##############################################################################
-my $image_ppm = ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp") . "/webcollage." . $$;
-my $image_tmp1 = $image_ppm . "-1";
-my $image_tmp2 = $image_ppm . "-2";
+my $image_ppm = sprintf ("%s/webcollage-%08x.ppm",
+ ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
+ rand(0xFFFFFFFF));
+my $image_tmp1 = sprintf ("%s/webcollage-1-%08x.ppm",
+ ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
+ rand(0xFFFFFFFF));
+my $image_tmp2 = sprintf ("%s/webcollage-2-%08x.ppm",
+ ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
+ rand(0xFFFFFFFF));
my $filter_cmd = undef;
my $post_filter_cmd = undef;
my $background = undef;
+my @imagemap_areas = ();
+my $imagemap_html_tmp = undef;
+my $imagemap_jpg_tmp = undef;
+
+
my $img_width; # size of the image being generated.
my $img_height;
-my $delay = 0;
+my $delay = 2;
-
-sub x_cleanup {
- my ($sig) = @_;
- print STDERR blurb() . "caught signal $sig.\n" if ($verbose_exec);
+sub x_cleanup() {
unlink $image_ppm, $image_tmp1, $image_tmp2;
- exit 1;
+ unlink $imagemap_html_tmp, $imagemap_jpg_tmp
+ if (defined ($imagemap_html_tmp));
}
# Like system, but prints status about exit codes, and kills this process
# with whatever signal killed the sub-process, if any.
#
-sub nontrapping_system {
+sub nontrapping_system(@) {
$! = 0;
$_ = join(" ", @_);
}
-# Given the URL of a GIF or JPEG image, and the body of that image, writes a
-# PPM to the given output file. Returns the width/height of the image if
-# successful.
+# Given the URL of a GIF, JPEG, or PNG image, and the body of that image,
+# writes a PPM to the given output file. Returns the width/height of the
+# image if successful.
#
-sub image_to_pnm {
+sub image_to_pnm($$$) {
my ($url, $body, $output) = @_;
my ($cmd, $cmd2, $w, $h);
} elsif ((@_ = jpeg_size ($body))) {
($w, $h) = @_;
$cmd = "djpeg";
+ } elsif ((@_ = png_size ($body))) {
+ ($w, $h) = @_;
+ $cmd = "pngtopnm";
} else {
LOG (($verbose_pbm || $verbose_load),
- "not a GIF or JPG" .
- (($body =~ m@<(base|html|head|body|script|table|a href)>@i)
+ "not a GIF, JPG, or PNG" .
+ (($body =~ m@<(base|html|head|body|script|table|a href)\b@i)
? " (looks like HTML)" : "") .
": $url");
$suppress_audit = 1;
$body = undef;
};
- if (($pid = open(PIPE, "| $cmd2 > $output"))) {
+ if (($pid = open (my $pipe, "| $cmd2 > $output"))) {
$timed_out = 0;
alarm $cvt_timeout;
- print PIPE $body;
+ print $pipe $body;
$body = undef;
- close PIPE;
+ close $pipe;
LOG ($verbose_exec, "awaiting $pid");
waitpid ($pid, 0);
}
}
-sub pick_root_displayer {
+
+# Same as the "ppmmake" command: creates a solid-colored PPM.
+# Does not understand the rgb.txt color names except "black" and "white".
+#
+sub ppmmake($$$$) {
+ my ($outfile, $bgcolor, $w, $h) = @_;
+
+ my ($r, $g, $b);
+ if ($bgcolor =~ m/^\#?([\dA-F][\dA-F])([\dA-F][\dA-F])([\dA-F][\dA-F])$/i ||
+ $bgcolor =~ m/^\#?([\dA-F])([\dA-F])([\dA-F])$/i) {
+ ($r, $g, $b) = (hex($1), hex($2), hex($3));
+ } elsif ($bgcolor =~ m/^black$/i) {
+ ($r, $g, $b) = (0, 0, 0);
+ } elsif ($bgcolor =~ m/^white$/i) {
+ ($r, $g, $b) = (0xFF, 0xFF, 0xFF);
+ } else {
+ error ("unparsable color name: $bgcolor");
+ }
+
+ my $pixel = pack('CCC', $r, $g, $b);
+ my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h));
+
+ open (my $out, '>', $outfile) || error ("$outfile: $!");
+ print $out $bits;
+ close $out;
+}
+
+
+sub pick_root_displayer() {
my @names = ();
+ if ($cocoa_p) {
+ # see "xscreensaver/hacks/webcollage-cocoa.m"
+ return "echo COCOA LOAD ";
+ }
+
foreach my $cmd (@root_displayers) {
$_ = $cmd;
my ($name) = m/^([^ ]+)/;
my $ppm_to_root_window_cmd = undef;
-sub x_or_pbm_output {
+sub x_or_pbm_output($) {
+ my ($window_id) = @_;
# Check for our helper program, to see whether we need to use PPM pipelines.
#
LOG (($verbose_pbm || $verbose_load), "no $_ program");
}
+ if ($cocoa_p && !defined ($webcollage_helper)) {
+ error ("webcollage-helper not found in Cocoa-mode!");
+ }
+
+
# make sure the various programs we execute exist, right up front.
#
- my @progs = ("ppmmake"); # always need this one
+ my @progs = ();
if (!defined($webcollage_helper)) {
# Only need these others if we don't have the helper.
- @progs = (@progs, "giftopnm", "djpeg", "pnmpaste", "pnmscale", "pnmcut");
+ @progs = (@progs,
+ "giftopnm", "pngtopnm", "djpeg",
+ "pnmpaste", "pnmscale", "pnmcut");
}
foreach (@progs) {
# find a root-window displayer program.
#
- $ppm_to_root_window_cmd = pick_root_displayer();
-
+ if (!$no_output_p) {
+ $ppm_to_root_window_cmd = pick_root_displayer();
+ }
- $SIG{HUP} = \&x_cleanup;
- $SIG{INT} = \&x_cleanup;
- $SIG{QUIT} = \&x_cleanup;
- $SIG{ABRT} = \&x_cleanup;
- $SIG{KILL} = \&x_cleanup;
- $SIG{TERM} = \&x_cleanup;
+ if (defined ($window_id)) {
+ error ("-window-id only works if xscreensaver-getimage is installed")
+ unless ($ppm_to_root_window_cmd =~ m/^xscreensaver-getimage\b/);
- # Need this so that if giftopnm dies, we don't die.
- $SIG{PIPE} = 'IGNORE';
+ error ("unparsable window id: $window_id")
+ unless ($window_id =~ m/^\d+$|^0x[\da-f]+$/i);
+ $ppm_to_root_window_cmd =~ s/--?root\b/$window_id/ ||
+ error ("unable to munge displayer: $ppm_to_root_window_cmd");
+ }
if (!$img_width || !$img_height) {
- $_ = "xdpyinfo";
- which ($_) || error "$_ not found on \$PATH.";
- $_ = `$_`;
- ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /;
- if (!defined($img_height)) {
- error "xdpyinfo failed.";
+
+ if (!defined ($window_id) &&
+ defined ($ENV{XSCREENSAVER_WINDOW})) {
+ $window_id = $ENV{XSCREENSAVER_WINDOW};
+ }
+
+ if (!defined ($window_id)) {
+ $_ = "xdpyinfo";
+ which ($_) || error "$_ not found on \$PATH.";
+ $_ = `$_`;
+ ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /;
+ if (!defined($img_height)) {
+ error "xdpyinfo failed.";
+ }
+ } else { # we have a window id
+ $_ = "xwininfo";
+ which ($_) || error "$_ not found on \$PATH.";
+ $_ .= " -id $window_id";
+ $_ = `$_`;
+ ($img_width, $img_height) = m/^\s*Width:\s*(\d+)\n\s*Height:\s*(\d+)\n/m;
+
+ if (!defined($img_height)) {
+ error "xwininfo failed.";
+ }
}
}
# Create the sold-colored base image.
#
- $_ = "ppmmake '$bgcolor' $img_width $img_height";
- LOG ($verbose_pbm, "creating base image: $_");
- nontrapping_system "$_ > $image_ppm";
+ LOG ($verbose_pbm, "creating base image: ${img_width}x${img_height}");
+ $_ = ppmmake ($image_ppm, $bgcolor, $img_width, $img_height);
# Paste the default background image in the middle of it.
#
my ($iw, $ih);
my $body = "";
- local *IMG;
- open(IMG, "<$bgimage") || error "couldn't open $bgimage: $!";
- my $cmd;
- while (<IMG>) { $body .= $_; }
- close (IMG);
+ open (my $imgf, '<', $bgimage) || error "couldn't open $bgimage: $!";
+ local $/ = undef; # read entire file
+ $body = <$imgf>;
+ close ($imgf);
+ my $cmd;
if ((@_ = gif_size ($body))) {
($iw, $ih) = @_;
$cmd = "giftopnm |";
($iw, $ih) = @_;
$cmd = "djpeg |";
+ } elsif ((@_ = png_size ($body))) {
+ ($iw, $ih) = @_;
+ $cmd = "pngtopnm |";
+
} elsif ($body =~ m/^P\d\n(\d+) (\d+)\n/) {
$iw = $1;
$ih = $2;
$cmd = "";
} else {
- error "$bgimage is not a GIF, JPEG, or PPM.";
+ error "$bgimage is not a GIF, JPEG, PNG, or PPM.";
}
my $x = int (($img_width - $iw) / 2);
"pasting $bgimage (${iw}x$ih) into base image at $x,$y");
$cmd .= "pnmpaste - $x $y $image_ppm > $image_tmp1";
- open (IMG, "| $cmd") || error "running $cmd: $!";
- print IMG $body;
+ open ($imgf, "| $cmd") || error "running $cmd: $!";
+ print $imgf $body;
$body = undef;
- close (IMG);
+ close ($imgf);
LOG ($verbose_exec, "subproc exited normally.");
rename ($image_tmp1, $image_ppm) ||
error "renaming $image_tmp1 to $image_ppm: $!";
}
}
-sub paste_image {
+sub paste_image($$$$) {
my ($base, $img, $body, $source) = @_;
$current_state = "paste";
my ($iw, $ih);
- if (defined ($webcollage_helper)) {
+ # If we are using the webcollage-helper, then we do not need to convert this
+ # image to a PPM. But, if we're using a filter command, we still must, since
+ # that's what the filters expect (webcollage-helper can read PPMs, so that's
+ # fine.)
+ #
+ if (defined ($webcollage_helper) &&
+ !defined ($filter_cmd)) {
($iw, $ih) = image_size ($body);
if (!$iw || !$ih) {
LOG (($verbose_pbm || $verbose_load),
- "not a GIF or JPG" .
+ "not a GIF, JPG, or PNG" .
(($body =~ m@<(base|html|head|body|script|table|a href)>@i)
? " (looks like HTML)" : "") .
": $img");
return 0;
}
- local *OUT;
- open (OUT, ">$image_tmp1") || error ("writing $image_tmp1: $!");
- print OUT $body || error ("writing $image_tmp1: $!");
- close OUT || error ("writing $image_tmp1: $!");
+ open (my $out, '>', $image_tmp1) || error ("writing $image_tmp1: $!");
+ (print $out $body) || error ("writing $image_tmp1: $!");
+ close ($out) || error ("writing $image_tmp1: $!");
} else {
($iw, $ih) = image_to_pnm ($img, $body, $image_tmp1);
rename ($image_tmp2, $image_tmp1);
# re-get the width/height in case the filter resized it.
- local *IMG;
- open(IMG, "<$image_tmp1") || return 0;
- $_ = <IMG>;
- $_ = <IMG>;
+ open (my $imgf, '<', $image_tmp1) || return 0;
+ $_ = <$imgf>;
+ $_ = <$imgf>;
($iw, $ih) = m/^(\d+) (\d+)$/;
- close (IMG);
+ close ($imgf);
return 0 unless ($iw && $ih);
}
- my $target_w = $img_width;
+ my $target_w = $img_width; # max rectangle into which the image must fit
my $target_h = $img_height;
my $cmd = "";
# Usually scale the image to fit on the screen -- but sometimes scale it
- # to fit on half or a quarter of the screen. Note that we don't merely
- # scale it to fit, we instead cut it in half until it fits -- that should
- # give a wider distribution of sizes.
+ # to fit on half or a quarter of the screen. (We do this by reducing the
+ # size of the target rectangle.) Note that the image is not merely scaled
+ # to fit; we instead cut the image in half repeatedly until it fits in the
+ # target rectangle -- that gives a wider distribution of sizes.
#
- if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; }
- if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; }
+ if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; } # reduce target rect
+ if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; }
if ($iw > $target_w || $ih > $target_h) {
while ($iw > $target_w ||
$ih > $target_h) {
$iw = int($iw / 2);
$ih = int($ih / 2);
+ $scale /= 2;
}
if ($iw <= 10 || $ih <= 10) {
LOG ($verbose_pbm, "scaling to ${iw}x$ih would have been bogus.");
return 0;
}
- LOG ($verbose_pbm, "scaling to ${iw}x$ih");
+ LOG ($verbose_pbm, "scaling to ${iw}x$ih ($scale)");
$cmd .= " | pnmscale -xsize $iw -ysize $ih";
}
# If any cropping needs to happen, add pnmcut.
#
if ($crop_x != 0 || $crop_y != 0 ||
- $crop_w != $iw || $crop_h != $ih) {
+ $crop_w != $iw || $crop_h != $ih) {
$iw = $crop_w;
$ih = $crop_h;
$cmd .= " | pnmcut $crop_x $crop_y $iw $ih";
# cumulative.
#
if ($post_filter_cmd) {
+
+ my $cmd;
+
$target = $image_tmp1;
- $rc = nontrapping_system "($post_filter_cmd) < $image_ppm > $target";
+ if (!defined ($webcollage_helper)) {
+ $cmd = "($post_filter_cmd) < $image_ppm > $target";
+ } else {
+ # Blah, my scripts need the JPEG data, but some other folks need
+ # the PPM data -- what to do? Ignore the problem, that's what!
+# $cmd = "djpeg < $image_ppm | ($post_filter_cmd) > $target";
+ $cmd = "($post_filter_cmd) < $image_ppm > $target";
+ }
+
+ $rc = nontrapping_system ($cmd);
if ($rc != 0) {
LOG ($verbose_pbm, "filter failed: \"$post_filter_cmd\"\n");
return;
# the next network retrieval, which is probably a better thing
# to do anyway.
#
- $cmd .= " &";
+ $cmd .= " &" unless ($cocoa_p);
$rc = nontrapping_system ($cmd);
$source .= "-" . stats_of($source);
print STDOUT "image: ${iw}x${ih} @ $x,$y $base $source\n"
if ($verbose_imgmap);
+ if ($imagemap_base) {
+ update_imagemap ($base, $x, $y, $iw, $ih,
+ $image_ppm, $img_width, $img_height);
+ }
clearlog();
}
-sub main {
+sub update_imagemap($$$$$$$$) {
+ my ($url, $x, $y, $w, $h, $image_ppm, $image_width, $image_height) = @_;
+
+ $current_state = "imagemap";
+
+ my $max_areas = 200;
+
+ $url = html_quote ($url);
+ my $x2 = $x + $w;
+ my $y2 = $y + $h;
+ my $area = "<AREA SHAPE=RECT COORDS=\"$x,$y,$x2,$y2\" HREF=\"$url\">";
+ unshift @imagemap_areas, $area; # put one on the front
+ if ($#imagemap_areas >= $max_areas) {
+ pop @imagemap_areas; # take one off the back.
+ }
+
+ LOG ($verbose_pbm, "area: $x,$y,$x2,$y2 (${w}x$h)");
+
+ my $map_name = $imagemap_base;
+ $map_name =~ s@^.*/@@;
+ $map_name = 'collage' if ($map_name eq '');
+
+ my $imagemap_html = $imagemap_base . ".html";
+ my $imagemap_jpg = $imagemap_base . ".jpg";
+
+ if (!defined ($imagemap_html_tmp)) {
+ $imagemap_html_tmp = $imagemap_html . sprintf (".%08x", rand(0xffffffff));
+ $imagemap_jpg_tmp = $imagemap_jpg . sprintf (".%08x", rand(0xffffffff));
+ }
+
+ # Read the imagemap html file (if any) to get a template.
+ #
+ my $template_html = '';
+ {
+ if (open (my $in, '<', $imagemap_html)) {
+ local $/ = undef; # read entire file
+ $template_html = <$in>;
+ close $in;
+ LOG ($verbose_pbm, "read template $imagemap_html");
+ }
+
+ if ($template_html =~ m/^\s*$/s) {
+ $template_html = ("<MAP NAME=\"$map_name\"></MAP>\n" .
+ "<IMG SRC=\"$imagemap_base.jpg\"" .
+ " USEMAP=\"$map_name\">\n");
+ LOG ($verbose_pbm, "created dummy template");
+ }
+ }
+
+ # Write the jpg to a tmp file
+ #
+ {
+ my $cmd;
+ if (defined ($webcollage_helper)) {
+ $cmd = "cp -p $image_ppm $imagemap_jpg_tmp";
+ } else {
+ $cmd = "cjpeg < $image_ppm > $imagemap_jpg_tmp";
+ }
+ my $rc = nontrapping_system ($cmd);
+ if ($rc != 0) {
+ error ("imagemap jpeg failed: \"$cmd\"\n");
+ }
+ }
+
+ # Write the html to a tmp file
+ #
+ {
+ my $body = $template_html;
+ my $areas = join ("\n\t", @imagemap_areas);
+ my $map = ("<MAP NAME=\"$map_name\">\n\t$areas\n</MAP>");
+ my $img = ("<IMG SRC=\"$imagemap_base.jpg\" " .
+ "BORDER=0 " .
+ "WIDTH=$image_width HEIGHT=$image_height " .
+ "USEMAP=\"#$map_name\">");
+ $body =~ s@(<MAP\s+NAME=\"[^\"]*\"\s*>).*?(</MAP>)@$map@is;
+ $body =~ s@<IMG\b[^<>]*\bUSEMAP\b[^<>]*>@$img@is;
+
+ # if there are magic webcollage spans in the html, update those too.
+ #
+ {
+ my @st = stat ($imagemap_jpg_tmp);
+ my $date = strftime("%d-%b-%Y %l:%M:%S %p %Z", localtime($st[9]));
+ my $size = int(($st[7] / 1024) + 0.5) . "K";
+ $body =~ s@(<SPAN\s+CLASS=\"webcollage_date\">).*?(</SPAN>)@$1$date$2@si;
+ $body =~ s@(<SPAN\s+CLASS=\"webcollage_size\">).*?(</SPAN>)@$1$size$2@si;
+ }
+
+ open (my $out, '>', $imagemap_html_tmp) || error ("$imagemap_html_tmp: $!");
+ (print $out $body) || error ("$imagemap_html_tmp: $!");
+ close ($out) || error ("$imagemap_html_tmp: $!");
+ LOG ($verbose_pbm, "wrote $imagemap_html_tmp");
+ }
+
+ # Rename the two tmp files to the real files
+ #
+ rename ($imagemap_html_tmp, $imagemap_html) ||
+ error "renaming $imagemap_html_tmp to $imagemap_html";
+ LOG ($verbose_pbm, "wrote $imagemap_html");
+ rename ($imagemap_jpg_tmp, $imagemap_jpg) ||
+ error "renaming $imagemap_jpg_tmp to $imagemap_jpg";
+ LOG ($verbose_pbm, "wrote $imagemap_jpg");
+}
+
+
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
+sub set_proxy() {
+
+ if (! $http_proxy) {
+ # historical suckage: the environment variable name is lower case.
+ $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
+ }
+
+ if (defined ($http_proxy)) {
+ if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
+ # historical suckage: allow "http://host:port" as well as "host:port".
+ $http_proxy = $1;
+ }
+
+ } else {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ # Note: this ignores the "ExceptionsList".
+ if ($server) {
+ $http_proxy = $server;
+ $http_proxy .= ":$port" if $port;
+ }
+ }
+
+ if ($http_proxy) {
+ LOG ($verbose_net, "proxy server: $http_proxy");
+ }
+}
+
+
+sub init_signals() {
+
+ $SIG{HUP} = \&signal_cleanup;
+ $SIG{INT} = \&signal_cleanup;
+ $SIG{QUIT} = \&signal_cleanup;
+ $SIG{ABRT} = \&signal_cleanup;
+ $SIG{KILL} = \&signal_cleanup;
+ $SIG{TERM} = \&signal_cleanup;
+
+ # Need this so that if giftopnm dies, we don't die.
+ $SIG{PIPE} = 'IGNORE';
+}
+
+END { exit_cleanup(); }
+
+
+sub main() {
$| = 1;
srand(time ^ $$);
my $verbose = 0;
my $dict;
+ my $driftnet_cmd = 0;
$current_state = "init";
$load_method = "none";
my $root_p = 0;
-
- # historical suckage: the environment variable name is lower case.
- $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
+ my $window_id = undef;
while ($_ = $ARGV[0]) {
shift @ARGV;
$ENV{DISPLAY} = shift @ARGV;
} elsif ($_ eq "-root") {
$root_p = 1;
+ } elsif ($_ eq "-window-id" || $_ eq "--window-id") {
+ $window_id = shift @ARGV;
+ $root_p = 1;
} elsif ($_ eq "-no-output") {
$no_output_p = 1;
} elsif ($_ eq "-urls-only") {
$urls_only_p = 1;
$no_output_p = 1;
+ } elsif ($_ eq "-cocoa") {
+ $cocoa_p = 1;
+ } elsif ($_ eq "-imagemap") {
+ $imagemap_base = shift @ARGV;
+ $no_output_p = 1;
} elsif ($_ eq "-verbose") {
$verbose++;
} elsif (m/^-v+$/) {
$http_proxy = shift @ARGV;
} elsif ($_ eq "-dictionary" || $_ eq "-dict") {
$dict = shift @ARGV;
+ } elsif ($_ eq "-opacity") {
+ $opacity = shift @ARGV;
+ error ("opacity must be between 0.0 and 1.0")
+ if ($opacity <= 0 || $opacity > 1);
+ } elsif ($_ eq "-driftnet" || $_ eq "--driftnet") {
+ @search_methods = ( 100, "driftnet", \&pick_from_driftnet );
+ if (! ($ARGV[0] =~ m/^-/)) {
+ $driftnet_cmd = shift @ARGV;
+ } else {
+ $driftnet_cmd = $default_driftnet_cmd;
+ }
+ } elsif ($_ eq "-directory" || $_ eq "--directory") {
+ @search_methods = ( 100, "local", \&pick_from_local_dir );
+ if (! ($ARGV[0] =~ m/^-/)) {
+ $local_dir = shift @ARGV;
+ } else {
+ error ("local directory path must be set")
+ }
+ } elsif ($_ eq "-fps") {
+ # -fps only works on MacOS, via "webcollage-cocoa.m".
+ # Ignore it if passed to this script in an X11 context.
} elsif ($_ eq "-debug" || $_ eq "--debug") {
my $which = shift @ARGV;
my @rest = @search_methods;
LOG (1, "DEBUG: using only \"$which\"");
} else {
- print STDERR "$copyright\nusage: $progname [-root]" .
- " [-display dpy] [-root] [-verbose] [-timeout secs]\n" .
- "\t\t [-delay secs] [-filter cmd] [-filter2 cmd]\n" .
- "\t\t [-dictionary dictionary-file]\n" .
- "\t\t [-http-proxy host[:port]]\n";
+ print STDERR "$copyright\nusage: $progname " .
+ "[-root] [-display dpy] [-verbose] [-debug which]\n" .
+ "\t\t [-timeout secs] [-delay secs] [-size WxH]\n" .
+ "\t\t [-no-output] [-urls-only] [-imagemap filename]\n" .
+ "\t\t [-background color] [-opacity f]\n" .
+ "\t\t [-filter cmd] [-filter2 cmd]\n" .
+ "\t\t [-dictionary dictionary-file] [-http-proxy host[:port]]\n" .
+ "\t\t [-driftnet [driftnet-program-and-args]]\n" .
+ "\t\t [-directory local-image-directory]\n" .
+ "\n";
exit 1;
}
}
- if ($http_proxy && $http_proxy eq "") {
- $http_proxy = undef;
- }
- if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
- # historical suckage: allow "http://host:port" as well as "host:port".
- $http_proxy = $1;
- }
-
- if (!$root_p && !$no_output_p) {
+ if (!$root_p && !$no_output_p && !$cocoa_p) {
print STDERR $copyright;
error "the -root argument is mandatory (for now.)";
}
- if (!$no_output_p && !$ENV{DISPLAY}) {
+ if (!$no_output_p && !$cocoa_p && !$ENV{DISPLAY}) {
error "\$DISPLAY is not set.";
}
pick_dictionary();
}
+ if ($imagemap_base && !($img_width && $img_height)) {
+ error ("-size WxH is required with -imagemap");
+ }
+
+ if (defined ($local_dir)) {
+ $_ = "xscreensaver-getimage-file";
+ which ($_) || error "$_ not found on \$PATH.";
+ }
+
+ init_signals();
+ set_proxy();
+
+ spawn_driftnet ($driftnet_cmd) if ($driftnet_cmd);
+
if ($urls_only_p) {
- url_only_output;
+ url_only_output ();
} else {
- x_or_pbm_output;
+ x_or_pbm_output ($window_id);
}
}
-main;
+main();
exit (0);