#!/usr/bin/perl -w
#
-# webcollage, Copyright (c) 1999-2005 by Jamie Zawinski <jwz@jwz.org>
+# webcollage, Copyright (c) 1999-2008 by Jamie Zawinski <jwz@jwz.org>
# This program decorates the screen with random images from the web.
# One satisfied customer described it as "a nonstop pop culture brainbath."
#
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.125 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
-my $copyright = "WebCollage $version, Copyright (c) 1999-2005" .
+my $version = q{ $Revision: 1.149 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my $copyright = "WebCollage $version, Copyright (c) 1999-2008" .
" Jamie Zawinski <jwz\@jwz.org>\n" .
" http://www.jwz.org/webcollage/\n";
-my @search_methods = ( 58, "altavista", \&pick_from_alta_vista_random_link,
- 11, "livejournal", \&pick_from_livejournal_images,
- 7, "yahoorand", \&pick_from_yahoo_random_link,
- 10, "googlephotos", \&pick_from_google_image_photos,
- 6, "googleimgs", \&pick_from_google_images,
- 3, "googlenums", \&pick_from_google_image_numbers,
- 5, "flickr", \&pick_from_flickr,
+my @search_methods = ( 20, "googlephotos", \&pick_from_google_image_photos,
+ 10, "googleimgs", \&pick_from_google_images,
+ 10, "googlenums", \&pick_from_google_image_numbers,
+
+ 19, "altavista", \&pick_from_alta_vista_random_link,
+ 12, "flickr_recent", \&pick_from_flickr_recent,
+ 10, "flickr_random", \&pick_from_flickr_random,
+ 10, "livejournal", \&pick_from_livejournal_images,
+ 5, "twitter", \&pick_from_twitter_images,
+ 4, "yahoorand", \&pick_from_yahoo_random_link,
+
+
+ # This is a cute way to search for a certain webcams.
+ # Not included in default methods, since these images
+ # aren't terribly interesting by themselves.
+ # See also "SurveillanceSaver".
+ #
+ 0, "securitycam", \&pick_from_security_camera,
# In Apr 2002, Google asked me to stop searching them.
# I asked them to add a "random link" url. They said
# (I don't see how they did it, though!)
"alwayshotels.com" => 1, # Poisoned Lycos pretty heavily.
"nextag.com" => 1, # Poisoned Alta Vista real good.
+ "ghettodriveby.com" => 1, # Poisoned Google Images.
+ "crosswordsolver.org" => 1, # Poisoned Google Images.
+ "xona.com" => 1, # Poisoned Google Images.
+ "freepatentsonline.com" => 1, # Poisoned Google Images.
+ "herbdatanz.com" => 1, # Poisoned Google Images.
);
"pics.livejournal.com" => 1,
"tinypic.com" => 1,
"flickr.com" => 1,
+ "pbase.com" => 1,
+ "blogger.com" => 1,
+ "multiply.com" => 1,
+ "wikimedia.org" => 1,
+ "twitpic.com" => 1,
+ "amazonaws.com" => 1, # used by twitpic.com
"yimg.com" => 1, # This is where dailynews.yahoo.com stores
"eimg.com" => 1, # its images, so pick_from_yahoo_news_text()
"images.quizdiva.net" => 1,
"driftnet" => 1, # builtin...
+ "local-directory" => 1, # builtin...
+);
+
+
+# For decoding HTML-encoded character entities to URLs.
+#
+my %entity_table = (
+ "apos" => '\'',
+ "quot" => '"', "amp" => '&', "lt" => '<', "gt" => '>',
+ "nbsp" => ' ', "iexcl" => '¡', "cent" => '¢', "pound" => '£',
+ "curren" => '¤', "yen" => '¥', "brvbar" => '¦', "sect" => '§',
+ "uml" => '¨', "copy" => '©', "ordf" => 'ª', "laquo" => '«',
+ "not" => '¬', "shy" => '', "reg" => '®', "macr" => '¯',
+ "deg" => '°', "plusmn" => '±', "sup2" => '²', "sup3" => '³',
+ "acute" => '´', "micro" => 'µ', "para" => '¶', "middot" => '·',
+ "cedil" => '¸', "sup1" => '¹', "ordm" => 'º', "raquo" => '»',
+ "frac14" => '¼', "frac12" => '½', "frac34" => '¾', "iquest" => '¿',
+ "Agrave" => 'À', "Aacute" => 'Á', "Acirc" => 'Â', "Atilde" => 'Ã',
+ "Auml" => 'Ä', "Aring" => 'Å', "AElig" => 'Æ', "Ccedil" => 'Ç',
+ "Egrave" => 'È', "Eacute" => 'É', "Ecirc" => 'Ê', "Euml" => 'Ë',
+ "Igrave" => 'Ì', "Iacute" => 'Í', "Icirc" => 'Î', "Iuml" => 'Ï',
+ "ETH" => 'Ð', "Ntilde" => 'Ñ', "Ograve" => 'Ò', "Oacute" => 'Ó',
+ "Ocirc" => 'Ô', "Otilde" => 'Õ', "Ouml" => 'Ö', "times" => '×',
+ "Oslash" => 'Ø', "Ugrave" => 'Ù', "Uacute" => 'Ú', "Ucirc" => 'Û',
+ "Uuml" => 'Ü', "Yacute" => 'Ý', "THORN" => 'Þ', "szlig" => 'ß',
+ "agrave" => 'à', "aacute" => 'á', "acirc" => 'â', "atilde" => 'ã',
+ "auml" => 'ä', "aring" => 'å', "aelig" => 'æ', "ccedil" => 'ç',
+ "egrave" => 'è', "eacute" => 'é', "ecirc" => 'ê', "euml" => 'ë',
+ "igrave" => 'ì', "iacute" => 'í', "icirc" => 'î', "iuml" => 'ï',
+ "eth" => 'ð', "ntilde" => 'ñ', "ograve" => 'ò', "oacute" => 'ó',
+ "ocirc" => 'ô', "otilde" => 'õ', "ouml" => 'ö', "divide" => '÷',
+ "oslash" => 'ø', "ugrave" => 'ù', "uacute" => 'ú', "ucirc" => 'û',
+ "uuml" => 'ü', "yacute" => 'ý', "thorn" => 'þ', "yuml" => 'ÿ',
+
+ # HTML 4 entities that do not have 1:1 Latin1 mappings.
+ "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"",
+ "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->",
+ "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>",
+ "empty" => "Ø", "minus" => "-", "lowast"=> "*", "sim" => "~",
+ "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==",
+ "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">",
+ "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y",
+ "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ",
+ "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`",
+ "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"",
+ "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">",
);
my $no_output_p = 0;
my $urls_only_p = 0;
+my $cocoa_p = 0;
my $imagemap_base = undef;
my @pids_to_kill = (); # forked pids we should kill when we exit, if any.
my $driftnet_dir = undef;
my $default_driftnet_cmd = "driftnet -a -m 100";
+my $local_magic = 'local-directory';
+my $local_dir = undef;
+
my $wordlist;
my %rejected_urls;
# returns three values: the HTTP response line; the document headers;
# and the document body.
#
-sub get_document_1 {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document_1($$$) {
+ my ($url, $referer, $timeout) = @_;
if (!defined($timeout)) { $timeout = $http_timeout; }
if ($timeout > $http_timeout) { $timeout = $http_timeout; }
if ($url =~ m@^http://www\.altavista\.com/@ ||
$url =~ m@^http://random\.yahoo\.com/@ ||
- $url =~ m@^http://images\.google\.com/@) {
+ $url =~ m@^http://images\.google\.com/@ ||
+ $url =~ m@^http://www\.google\.com/@) {
# block this, you turkeys.
- $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.5)" .
- " Gecko/20041111 Firefox/1.0";
+ $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" .
+ " Gecko/20070914 Firefox/2.0.0.7";
+
+ # 28-Jun-2007: Google Images now emits the entire page in JS if
+ # you claim to be Gecko. They also still block "webcollage".
+ # They serve non-JS for unrecognised agents, so let's try this...
+ $user_agent = "NoJavascriptPlease/1.0"
+ if ($url =~ m@^http://[a-z]+\.google\.com/@);
}
my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" .
# returns two values: the document headers; and the document body.
# if the given URL did a redirect, returns the redirected-to document.
#
-sub get_document {
- my ( $url, $referer, $timeout ) = @_;
+sub get_document($$;$) {
+ my ($url, $referer, $timeout) = @_;
my $start = time;
if (defined($referer) && $referer eq $driftnet_magic) {
return get_driftnet_file ($url);
}
+ if (defined($referer) && $referer eq $local_magic) {
+ return get_local_file ($url);
+ }
+
my $orig_url = $url;
my $loop_count = 0;
my $max_loop_count = 4;
# in again, but you have to present the old cookie to get the new cookie.
# So, by doing this, the built-in cypherpunks cookie will never go "stale".
#
-sub set_cookie {
+sub set_cookie($$) {
my ($host, $cookie) = @_;
my $oc = $cookies{$host};
return unless $oc;
# given a URL and the body text at that URL, selects and returns a random
# image from it. returns () if no suitable images found.
#
-sub pick_image_from_body {
- my ( $url, $body ) = @_;
+sub pick_image_from_body($$) {
+ my ($url, $body) = @_;
my $base = $url;
$_ = $url;
return $url;
}
+# Given a URL and the RSS feed from that URL, pick a random image from
+# the feed. This is a lot simpler than extracting images out of a page:
+# we already know we have reasonable images, so we just pick one.
+# Returns: the real URL of the page (preferably not the RSS version),
+# and the image.
+
+sub pick_image_from_rss($$) {
+ my ( $url, $body ) = @_;
+ my @suitable = ($body =~ m/<enclosure url="(.*?)"/g);
+
+ my ($base) = ($body =~ m@<link>([^<>]+)</link>@i);
+ $base = $url unless $base;
+
+ # pick a random element of the table
+ if (@suitable) {
+ my $i = int(rand(scalar @suitable));
+ my $url = $suitable[$i];
+ LOG ($verbose_load, "picked image " .($i+1) . "/" .
+ ($#suitable+1) . ": $url");
+ return ($base, $url);
+ }
+ return;
+}
\f
############################################################################
############################################################################
-sub pick_dictionary {
+sub pick_dictionary() {
my @dicts = ("/usr/dict/words",
"/usr/share/dict/words",
- "/usr/share/lib/dict/words");
+ "/usr/share/lib/dict/words",
+ "/usr/share/dict/cracklib-small",
+ "/usr/share/dict/cracklib-words"
+ );
foreach my $f (@dicts) {
if (-f $f) {
$wordlist = $f;
# returns a random word from the dictionary
#
-sub random_word {
+sub random_word() {
local *IN;
if (! open (IN, "<$wordlist")) {
}
-sub random_words {
+sub random_words($) {
my ($or_p) = @_;
my $sep = ($or_p ? "%20OR%20" : "%20");
return (random_word . $sep .
}
-sub url_quote {
+sub url_quote($) {
my ($s) = @_;
$s =~ s|([^-a-zA-Z0-9.\@/_\r\n])|sprintf("%%%02X", ord($1))|ge;
return $s;
}
-sub url_unquote {
+sub url_unquote($) {
my ($s) = @_;
$s =~ s/[+]/ /g;
$s =~ s/%([a-z0-9]{2})/chr(hex($1))/ige;
return $s;
}
-sub html_quote {
+sub html_quote($) {
my ($s) = @_;
$s =~ s/&/&/gi;
$s =~ s/</</gi;
return $s;
}
-sub html_unquote {
+sub html_unquote($) {
my ($s) = @_;
- $s =~ s/</</gi; # far from exhaustive...
- $s =~ s/>/</gi;
- $s =~ s/"/\"/gi;
- $s =~ s/&/&/gi;
+ $s =~ s/(&([a-z]+);)/{ $entity_table{$2} || $1; }/gexi; # e.g., '
+ $s =~ s/(&\#(\d+);)/{ chr($2) }/gexi; # e.g., '
return $s;
}
# Note that this list contains all kinds of internal search engine
# junk URLs too -- caller must prune them.
#
-sub pick_from_search_engine {
+sub pick_from_search_engine($$$) {
my ( $timeout, $search_url, $words ) = @_;
$_ = $words;
}
-sub depoison {
+sub depoison(@) {
my (@urls) = @_;
my @urls2 = ();
foreach (@urls) {
# random image from it.
# returns the url of the page loaded; the url of the image chosen.
#
-sub pick_image_from_pages {
+sub pick_image_from_pages($$$$@) {
my ($base, $total_hit_count, $unfiltered_link_count, $timeout, @pages) = @_;
$total_hit_count = "?" unless defined($total_hit_count);
# returns two URLs: the page containing the image, and the image.
# Returns () if nothing found this time.
#
-sub pick_from_yahoo_random_link {
- my ( $timeout ) = @_;
+sub pick_from_yahoo_random_link($) {
+ my ($timeout) = @_;
print STDERR "\n\n" if ($verbose_load);
LOG ($verbose_load, "URL: $yahoo_random_link");
############################################################################
#
# Pick images from random pages returned by the Alta Vista Random Link
+# Note: this seems to have gotten a *lot* less random lately (2007).
#
############################################################################
# returns two URLs: the page containing the image, and the image.
# Returns () if nothing found this time.
#
-sub pick_from_alta_vista_random_link {
- my ( $timeout ) = @_;
+sub pick_from_alta_vista_random_link($) {
+ my ($timeout) = @_;
print STDERR "\n\n" if ($verbose_load);
LOG ($verbose_load, "URL: $alta_vista_random_link");
"&q=";
# avimages
-sub pick_from_alta_vista_images {
- my ( $timeout ) = @_;
+sub pick_from_alta_vista_images($) {
+ my ($timeout) = @_;
my $words = random_word();
my $page = (int(rand(9)) + 1);
}
+\f
+############################################################################
+#
+# Pick images from Aptix security cameras
+# Cribbed liberally from google image search code.
+# By Jason Sullivan <jasonsul@us.ibm.com>
+#
+############################################################################
+
+my $aptix_images_url = ("http://www.google.com/search" .
+ "?q=inurl:%22jpg/image.jpg%3Fr%3D%22");
+
+# securitycam
+sub pick_from_security_camera($) {
+ my ($timeout) = @_;
+
+ my $page = (int(rand(9)) + 1);
+ my $num = 20; # 20 images per page
+ my $search_url = $aptix_images_url;
+
+ if ($page > 1) {
+ $search_url .= "&start=" . $page*$num; # page number
+ $search_url .= "&num=" . $num; #images per page
+ }
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, '');
+
+ my @candidates = ();
+ my %referers;
+ foreach my $u (@subpages) {
+ next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins (most links)
+ next unless ($u =~ m@jpg/image.jpg\?r=@i); # All pics contain this
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ $referers{$u} = $u;
+ }
+
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
+
\f
############################################################################
#
"&q=";
# googleimgs
-sub pick_from_google_images {
- my ( $timeout, $words, $max_page ) = @_;
+sub pick_from_google_images($;$$) {
+ my ($timeout, $words, $max_page) = @_;
if (!defined($words)) {
$words = random_word; # only one word for Google
# googlenums
-sub pick_from_google_image_numbers {
- my ( $timeout ) = @_;
+sub pick_from_google_image_numbers($) {
+ my ($timeout) = @_;
my $max = 9999;
my $number = int(rand($max));
# googlephotos
-sub pick_from_google_image_photos {
- my ( $timeout ) = @_;
+sub pick_from_google_image_photos($) {
+ my ($timeout) = @_;
my $i = int(rand($#photomakers + 1));
my $fn = $photomakers[$i];
"&aqo=";
# avtext
-sub pick_from_alta_vista_text {
- my ( $timeout ) = @_;
+sub pick_from_alta_vista_text($) {
+ my ($timeout) = @_;
my $words = random_words(0);
my $page = (int(rand(9)) + 1);
"&cobrand=undefined" .
"&query=");
-sub pick_from_hotbot_text {
- my ( $timeout ) = @_;
+sub pick_from_hotbot_text($) {
+ my ($timeout) = @_;
$last_search = $hotbot_search_url; # for warnings
"&tab=web" .
"&query=";
-sub pick_from_lycos_text {
- my ( $timeout ) = @_;
+sub pick_from_lycos_text($) {
+ my ($timeout) = @_;
$last_search = $lycos_search_url; # for warnings
"&p=";
# yahoonews
-sub pick_from_yahoo_news_text {
- my ( $timeout ) = @_;
+sub pick_from_yahoo_news_text($) {
+ my ($timeout) = @_;
$last_search = $yahoo_news_url; # for warnings
my %lj_cache = (); # hash, for detecting dups
# livejournal
-sub pick_from_livejournal_images {
- my ( $timeout ) = @_;
+sub pick_from_livejournal_images($) {
+ my ($timeout) = @_;
$last_search = $livejournal_img_url; # for warnings
my $ircimages_url = "http://ircimages.com/";
# ircimages
-sub pick_from_ircimages {
- my ( $timeout ) = @_;
+sub pick_from_ircimages($) {
+ my ($timeout) = @_;
$last_search = $ircimages_url; # for warnings
return ($search_url, $img);
}
+\f
+############################################################################
+#
+# Pick images from Twitter's list of recently-posted images.
+#
+############################################################################
+
+my $twitter_img_url = "http://twitpic.com/public_timeline/feed.rss";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitter, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twit_cache_size = 1000;
+my @twit_cache = (); # fifo, for ordering by age
+my %twit_cache = (); # hash, for detecting dups
+
+# twitter
+sub pick_from_twitter_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitter_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitter_img_url, undef, $timeout);
+
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<item)\b/\n$1/gsi;
+
+ my @items = split (/\n/, $body);
+ shift @items;
+ foreach (@items) {
+ next unless (m@<link>([^<>]*)</link>@si);
+ my $page = html_unquote ($1);
+
+ $page =~ s@/$@@s;
+ $page .= '/full';
+
+ next if ($twit_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page");
+ push @twit_cache, $page;
+ $twit_cache{$page} = $page;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twit_cache == -1);
+
+ my $n = $#twit_cache+1;
+ my $i = int(rand($n));
+ my $page = $twit_cache[$i];
+
+ # delete this one from @twit_cache and from %twit_cache.
+ #
+ @twit_cache = ( @twit_cache[0 .. $i-1],
+ @twit_cache[$i+1 .. $#twit_cache] );
+ delete $twit_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twit_cache >= $twit_cache_size) {
+ my $page = shift @twit_cache;
+ delete $twit_cache{$page};
+ }
+
+ ( $base, $body ) = get_document ($page, undef, $timeout);
+ my $img = undef;
+
+ foreach (split (/<img\s+/, $body)) {
+ my ($src) = m/\bsrc=[\"\'](.*?)[\"\']/si;
+ next unless $src;
+ next if m@/js/@s;
+ next if m@/images/@s;
+
+ $img = $src;
+
+ # Sometimes these images are hosted on twitpic, sometimes on Amazon.
+ if ($img =~ m@^/@) {
+ $base =~ s@^(https?://[^/]+)/.*@$1@s;
+ $img = $base . $img;
+ }
+ last;
+ }
+
+ if (!$img) {
+ LOG ($verbose_load, "no matching images on $page\n");
+ return ();
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
\f
############################################################################
#
my %flickr_cache = (); # hash, for detecting dups
-# flickr
-sub pick_from_flickr {
- my ( $timeout ) = @_;
+# flickr_recent
+sub pick_from_flickr_recent($) {
+ my ($timeout) = @_;
my $start = 16 * int(rand(100));
$page = html_unquote ($page);
$thumb = html_unquote ($thumb);
- next unless ($thumb =~ m@^http://photos\d*\.flickr\.com/@);
+ next unless ($thumb =~ m@^http://farm\d*\.static\.flickr\.com/@);
my $base = "http://www.flickr.com/";
$page =~ s@^/@$base@;
return ($page, $img);
}
+\f
+############################################################################
+#
+# Pick images from a random RSS feed on Flickr.
+#
+############################################################################
+
+my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne" .
+ "?format=rss_200_enc&tags=");
+
+# Picks a random RSS feed; picks a random image from that feed;
+# returns 2 URLs: the page containing the image, and the image.
+# Mostly by Joe Mcmahon <mcmahon@yahoo-inc.com>
+#
+# flickr_random
+sub pick_from_flickr_random($) {
+ my $timeout = shift;
+
+ my $rss = $flickr_rss_base . random_word();
+ $last_search = $rss;
+
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "URL: $last_search");
+
+ $suppress_audit = 1;
+
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
+ }
+
+ my $img;
+ ($base, $img) = pick_image_from_rss ($base, $body);
+ $body = undef;
+ return () unless defined ($img);
+
+ LOG ($verbose_load, "redirected to: $base");
+ return ($base, $img);
+}
+
\f
############################################################################
#
############################################################################
# driftnet
-sub pick_from_driftnet {
- my ( $timeout ) = @_;
+sub pick_from_driftnet($) {
+ my ($timeout) = @_;
my $id = $driftnet_magic;
my $dir = $driftnet_dir;
}
-sub get_driftnet_file {
+sub get_driftnet_file($) {
my ($file) = @_;
error ("\$driftnet_dir unset?") unless ($driftnet_dir);
}
-sub spawn_driftnet {
+sub spawn_driftnet($) {
my ($cmd) = @_;
# make a directory to use.
unless (1 == kill (0, $pid));
}
+# local-directory
+sub pick_from_local_dir {
+ my ( $timeout ) = @_;
+
+ my $id = $local_magic;
+ $last_search = $id;
+
+ my $dir = $local_dir;
+ error ("\$local_dir unset?") unless ($dir);
+ $dir =~ s@/+$@@;
+
+ error ("$dir unreadable") unless (-d "$dir/.");
+
+ my $v = ($verbose_exec ? "-v" : "");
+ my $pick = `xscreensaver-getimage-file $v "$dir"`;
+
+ LOG ($verbose_load, "picked file $pick ($id)");
+ return ($id, $pick);
+}
+
+
+sub get_local_file {
+ my ($file) = @_;
+
+ error ("\$local_dir unset?") unless ($local_dir);
+
+ my $id = $local_magic;
+ my $re = qr/$local_dir/;
+ error ("$id: $file not in $local_dir?")
+ unless ($file =~ m@^$re@o);
+
+ local *IN;
+ open (IN, $file) || error ("$id: $file: $!");
+ my $body = '';
+ while (<IN>) { $body .= $_; }
+ close IN || error ("$id: $file: $!");
+ return ($id, $body);
+}
+
+
\f
############################################################################
#
# Returns () if nothing found this time.
#
-sub pick_image {
- my ( $timeout ) = @_;
+sub pick_image(;$) {
+ my ($timeout) = @_;
$current_state = "select";
$load_method = "none";
#
############################################################################
-sub timestr {
+sub timestr() {
return strftime ("%H:%M:%S: ", localtime);
}
-sub blurb {
+sub blurb() {
return "$progname: " . timestr() . "$current_state: ";
}
-sub error {
+sub error($) {
my ($err) = @_;
print STDERR blurb() . "$err\n";
exit 1;
}
-sub stacktrace {
+sub stacktrace() {
my $i = 1;
print STDERR "$progname: stack trace:\n";
while (1) {
my $lastlog = "";
-sub clearlog {
+sub clearlog() {
$lastlog = "";
}
-sub showlog {
+sub showlog() {
my $head = "$progname: DEBUG: ";
foreach (split (/\n/, $lastlog)) {
print STDERR "$head$_\n";
$lastlog = "";
}
-sub LOG {
+sub LOG($$) {
my ($print, $msg) = @_;
my $blurb = timestr() . "$current_state: ";
$lastlog .= "$blurb$msg\n";
my %stats_elapsed;
my $last_state = undef;
-sub record_attempt {
+sub record_attempt($) {
my ($name) = @_;
if ($last_state) {
$suppress_audit = 0;
}
-sub record_success {
+sub record_success($$$) {
my ($name, $url, $base) = @_;
if (defined($stats_successes{$name})) {
$stats_successes{$name}++;
}
-sub record_failure {
+sub record_failure($) {
my ($name) = @_;
return if $image_succeeded;
-sub stats_of {
+sub stats_of($) {
my ($name) = @_;
my $i = $stats_successes{$name};
my $j = $stats_attempts{$name};
my $current_start_time = 0;
-sub start_timer {
+sub start_timer($) {
my ($name) = @_;
$current_start_time = time;
}
}
-sub stop_timer {
+sub stop_timer($$) {
my ($name, $success) = @_;
$stats_elapsed{$name} += time - $current_start_time;
}
my $last_report_time = 0;
-sub report_performance {
+sub report_performance() {
return unless $verbose_warnings;
my $suc = $stats_successes{$name} || 0;
my $pct = int($suc * 100 / $try);
my $secs = $stats_elapsed{$name};
- my $secs_link = int($secs / $try);
- print STDERR sprintf ("$blurb %-12s %4s (%d/%d);\t %2d secs/link\n",
+ my $secs_link = $secs / $try;
+ print STDERR sprintf ("$blurb %-14s %4s (%d/%d);" .
+ " \t %.1f secs/link\n",
"$name:", "$pct%", $suc, $try, $secs_link);
}
}
my @recent_images = ();
my @recent_sites = ();
-sub save_recent_url {
+sub save_recent_url($$) {
my ($url, $base) = @_;
return unless ($verbose_warnings);
my ($site) = m@^http://([^ \t\n\r/:]+)@;
return unless defined ($site);
- if ($base eq $driftnet_magic) {
- $site = $driftnet_magic;
+ if ($base eq $driftnet_magic || $base eq $local_magic) {
+ $site = $base;
@recent_images = ();
}
# Does %-decoding.
#
-sub url_decode {
+sub url_decode($) {
($_) = @_;
tr/+/ /;
s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
# Given the raw body of a GIF document, returns the dimensions of the image.
#
-sub gif_size {
+sub gif_size($) {
my ($body) = @_;
my $type = substr($body, 0, 6);
my $s;
return () unless ($type =~ /GIF8[7,9]a/);
$s = substr ($body, 6, 10);
my ($a,$b,$c,$d) = unpack ("C"x4, $s);
+ return () unless defined ($d);
return (($b<<8|$a), ($d<<8|$c));
}
# Given the raw body of a JPEG document, returns the dimensions of the image.
#
-sub jpeg_size {
+sub jpeg_size($) {
my ($body) = @_;
my $i = 0;
my $L = length($body);
# Given the raw body of a PNG document, returns the dimensions of the image.
#
-sub png_size {
+sub png_size($) {
my ($body) = @_;
return () unless ($body =~ m/^\211PNG\r/);
my ($bits) = ($body =~ m/^.{12}(.{12})/s);
# Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions
# of the image.
#
-sub image_size {
+sub image_size($) {
my ($body) = @_;
my ($w, $h) = gif_size ($body);
if ($w && $h) { return ($w, $h); }
# returns the full path of the named program, or undef.
#
-sub which {
+sub which($) {
my ($prog) = @_;
foreach (split (/:/, $ENV{PATH})) {
if (-x "$_/$prog") {
# Like rand(), but chooses numbers with a bell curve distribution.
-sub bellrand {
+sub bellrand(;$) {
($_) = @_;
$_ = 1.0 unless defined($_);
$_ /= 3.0;
}
-sub exit_cleanup {
+sub exit_cleanup() {
x_cleanup();
print STDERR "$progname: exiting\n" if ($verbose_warnings);
if (@pids_to_kill) {
}
}
-sub signal_cleanup {
+sub signal_cleanup($) {
my ($sig) = @_;
print STDERR blurb() . (defined($sig)
? "caught signal $sig."
#
##############################################################################
-sub url_only_output {
+sub url_only_output() {
do {
my ($base, $img) = pick_image;
if ($img) {
my $delay = 2;
-sub x_cleanup {
+sub x_cleanup() {
unlink $image_ppm, $image_tmp1, $image_tmp2;
unlink $imagemap_html_tmp, $imagemap_jpg_tmp
if (defined ($imagemap_html_tmp));
# Like system, but prints status about exit codes, and kills this process
# with whatever signal killed the sub-process, if any.
#
-sub nontrapping_system {
+sub nontrapping_system(@) {
$! = 0;
$_ = join(" ", @_);
# writes a PPM to the given output file. Returns the width/height of the
# image if successful.
#
-sub image_to_pnm {
+sub image_to_pnm($$$) {
my ($url, $body, $output) = @_;
my ($cmd, $cmd2, $w, $h);
}
}
-sub pick_root_displayer {
+
+# Same as the "ppmmake" command: creates a solid-colored PPM.
+# Does not understand the rgb.txt color names except "black" and "white".
+#
+sub ppmmake($$$$) {
+ my ($outfile, $bgcolor, $w, $h) = @_;
+
+ my ($r, $g, $b);
+ if ($bgcolor =~ m/^\#?([\dA-F][\dA-F])([\dA-F][\dA-F])([\dA-F][\dA-F])$/i ||
+ $bgcolor =~ m/^\#?([\dA-F])([\dA-F])([\dA-F])$/i) {
+ ($r, $g, $b) = (hex($1), hex($2), hex($3));
+ } elsif ($bgcolor =~ m/^black$/i) {
+ ($r, $g, $b) = (0, 0, 0);
+ } elsif ($bgcolor =~ m/^white$/i) {
+ ($r, $g, $b) = (0xFF, 0xFF, 0xFF);
+ } else {
+ error ("unparsable color name: $bgcolor");
+ }
+
+ my $pixel = pack('CCC', $r, $g, $b);
+ my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h));
+
+ local *OUT;
+ open (OUT, ">$outfile") || error ("$outfile: $!");
+ print OUT $bits;
+ close OUT;
+}
+
+
+sub pick_root_displayer() {
my @names = ();
+ if ($cocoa_p) {
+ # see "xscreensaver/hacks/webcollage-cocoa.m"
+ return "echo COCOA LOAD ";
+ }
+
foreach my $cmd (@root_displayers) {
$_ = $cmd;
my ($name) = m/^([^ ]+)/;
my $ppm_to_root_window_cmd = undef;
-sub x_or_pbm_output {
+sub x_or_pbm_output($) {
my ($window_id) = @_;
# Check for our helper program, to see whether we need to use PPM pipelines.
LOG (($verbose_pbm || $verbose_load), "no $_ program");
}
+ if ($cocoa_p && !defined ($webcollage_helper)) {
+ error ("webcollage-helper not found in Cocoa-mode!");
+ }
+
+
# make sure the various programs we execute exist, right up front.
#
- my @progs = ("ppmmake"); # always need this one
+ my @progs = ();
if (!defined($webcollage_helper)) {
# Only need these others if we don't have the helper.
# find a root-window displayer program.
#
- $ppm_to_root_window_cmd = pick_root_displayer();
+ if (!$no_output_p) {
+ $ppm_to_root_window_cmd = pick_root_displayer();
+ }
if (defined ($window_id)) {
error ("-window-id only works if xscreensaver-getimage is installed")
# Create the sold-colored base image.
#
- $_ = "ppmmake '$bgcolor' $img_width $img_height";
- LOG ($verbose_pbm, "creating base image: $_");
- nontrapping_system "$_ > $image_ppm";
+ LOG ($verbose_pbm, "creating base image: ${img_width}x${img_height}");
+ $_ = ppmmake ($image_ppm, $bgcolor, $img_width, $img_height);
# Paste the default background image in the middle of it.
#
}
}
-sub paste_image {
+sub paste_image($$$$) {
my ($base, $img, $body, $source) = @_;
$current_state = "paste";
# the next network retrieval, which is probably a better thing
# to do anyway.
#
- $cmd .= " &";
+ $cmd .= " &" unless ($cocoa_p);
$rc = nontrapping_system ($cmd);
}
-sub update_imagemap {
+sub update_imagemap($$$$$$$$) {
my ($url, $x, $y, $w, $h, $image_ppm, $image_width, $image_height) = @_;
$current_state = "imagemap";
}
-sub init_signals {
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
+sub set_proxy() {
+
+ if (! $http_proxy) {
+ # historical suckage: the environment variable name is lower case.
+ $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
+ }
+
+ if (defined ($http_proxy)) {
+ if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
+ # historical suckage: allow "http://host:port" as well as "host:port".
+ $http_proxy = $1;
+ }
+
+ } else {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ # Note: this ignores the "ExceptionsList".
+ if ($server) {
+ $http_proxy = $server;
+ $http_proxy .= ":$port" if $port;
+ }
+ }
+
+ if ($http_proxy) {
+ LOG ($verbose_net, "proxy server: $http_proxy");
+ }
+}
+
+
+sub init_signals() {
$SIG{HUP} = \&signal_cleanup;
$SIG{INT} = \&signal_cleanup;
END { exit_cleanup(); }
-sub main {
+sub main() {
$| = 1;
srand(time ^ $$);
my $root_p = 0;
my $window_id = undef;
- # historical suckage: the environment variable name is lower case.
- $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
-
while ($_ = $ARGV[0]) {
shift @ARGV;
if ($_ eq "-display" ||
} elsif ($_ eq "-urls-only") {
$urls_only_p = 1;
$no_output_p = 1;
+ } elsif ($_ eq "-cocoa") {
+ $cocoa_p = 1;
} elsif ($_ eq "-imagemap") {
$imagemap_base = shift @ARGV;
$no_output_p = 1;
} else {
$driftnet_cmd = $default_driftnet_cmd;
}
+ } elsif ($_ eq "-directory" || $_ eq "--directory") {
+ @search_methods = ( 100, "local", \&pick_from_local_dir );
+ if (! ($ARGV[0] =~ m/^-/)) {
+ $local_dir = shift @ARGV;
+ } else {
+ error ("local directory path must be set")
+ }
} elsif ($_ eq "-debug" || $_ eq "--debug") {
my $which = shift @ARGV;
my @rest = @search_methods;
"\t\t [-filter cmd] [-filter2 cmd] [-background color]\n" .
"\t\t [-dictionary dictionary-file] [-http-proxy host[:port]]\n" .
"\t\t [-driftnet [driftnet-program-and-args]]\n" .
+ "\t\t [-directory local-image-directory]\n" .
"\n";
exit 1;
}
}
- if ($http_proxy && $http_proxy eq "") {
- $http_proxy = undef;
- }
- if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
- # historical suckage: allow "http://host:port" as well as "host:port".
- $http_proxy = $1;
- }
-
- if (!$root_p && !$no_output_p) {
+ if (!$root_p && !$no_output_p && !$cocoa_p) {
print STDERR $copyright;
error "the -root argument is mandatory (for now.)";
}
- if (!$no_output_p && !$ENV{DISPLAY}) {
+ if (!$no_output_p && !$cocoa_p && !$ENV{DISPLAY}) {
error "\$DISPLAY is not set.";
}
pick_dictionary();
}
+ if ($imagemap_base && !($img_width && $img_height)) {
+ error ("-size WxH is required with -imagemap");
+ }
+
+ if (defined ($local_dir)) {
+ $_ = "xscreensaver-getimage-file";
+ which ($_) || error "$_ not found on \$PATH.";
+ }
+
init_signals();
+ set_proxy();
spawn_driftnet ($driftnet_cmd) if ($driftnet_cmd);
}
}
-main;
+main();
exit (0);