#!/usr/bin/perl -w
#
-# webcollage, Copyright (c) 1999-2005 by Jamie Zawinski <jwz@jwz.org>
+# webcollage, Copyright © 1999-2013 by Jamie Zawinski <jwz@jwz.org>
# This program decorates the screen with random images from the web.
# One satisfied customer described it as "a nonstop pop culture brainbath."
#
#use diagnostics;
-use Socket;
require Time::Local;
require POSIX;
use Fcntl ':flock'; # import LOCK_* constants
use POSIX qw(strftime);
-
-use bytes; # Larry can take Unicode and shove it up his ass sideways.
- # Perl 5.8.0 causes us to start getting incomprehensible
- # errors about UTF-8 all over the place without this.
+use LWP::UserAgent;
+use bytes;
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.140 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
-my $copyright = "WebCollage $version, Copyright (c) 1999-2005" .
+my $version = q{ $Revision: 1.162 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my $copyright = "WebCollage $version, Copyright (c) 1999-2013" .
" Jamie Zawinski <jwz\@jwz.org>\n" .
" http://www.jwz.org/webcollage/\n";
-my @search_methods = ( 20, "googlephotos", \&pick_from_google_image_photos,
- 11, "googleimgs", \&pick_from_google_images,
- 11, "googlenums", \&pick_from_google_image_numbers,
- 20, "altavista", \&pick_from_alta_vista_random_link,
- 13, "flickr_recent", \&pick_from_flickr_recent,
- 10, "flickr_random", \&pick_from_flickr_random,
- 9, "livejournal", \&pick_from_livejournal_images,
- 6, "yahoorand", \&pick_from_yahoo_random_link,
+my @search_methods = (
+ # Google is rate-limiting us now, so this works ok from
+ # a short-running screen saver, but not as a batch job.
+ # I haven't found a workaround.
+ #
+ 7, "googlephotos", \&pick_from_google_image_photos,
+ 5, "googleimgs", \&pick_from_google_images,
+ 5, "googlenums", \&pick_from_google_image_numbers,
+
+ # So let's try Bing instead. No rate limiting yet!
+ #
+ 7, "bingphotos", \&pick_from_bing_image_photos,
+ 6, "bingimgs", \&pick_from_bing_images,
+ 6, "bingnums", \&pick_from_bing_image_numbers,
+
+ 19, "flickr_recent", \&pick_from_flickr_recent,
+ 15, "flickr_random", \&pick_from_flickr_random,
+ 20, "instagram", \&pick_from_instagram,
+ 6, "livejournal", \&pick_from_livejournal_images,
+ 4, "yahoorand", \&pick_from_yahoo_random_link,
+
+ # Twitter destroyed their whole API in 2013.
+ # 0, "twitpic", \&pick_from_twitpic_images,
+ # 0, "twitter", \&pick_from_twitter_images,
# This is a cute way to search for a certain webcams.
# Not included in default methods, since these images
#
0, "securitycam", \&pick_from_security_camera,
+ # Nonfunctional as of June 2011.
+ # 0, "altavista", \&pick_from_alta_vista_random_link,
+
# In Apr 2002, Google asked me to stop searching them.
# I asked them to add a "random link" url. They said
# "that would be easy, we'll think about it" and then
# it's no longer possible to do "or" searches on news
# images, so we rarely get any hits there any more.
#
- # 0, "yahoonews", \&pick_from_yahoo_news_text,
+ # 0, "yahoonews", \&pick_from_yahoo_news_text,
# Dec 2004: the ircimages guy's server can't take the
# heat, so he started banning the webcollage user agent.
# I tried to convince him to add a lighter-weight page to
# support webcollage better, but he doesn't care.
#
- # 0, "ircimages", \&pick_from_ircimages,
+ # 0, "ircimages", \&pick_from_ircimages,
# Dec 2002: Alta Vista has a new "random link" URL now.
# They added it specifically to better support webcollage!
# That was super cool of them. This is how we used to do
# it, before:
#
- # 0, "avimages", \&pick_from_alta_vista_images,
- # 0, "avtext", \&pick_from_alta_vista_text,
+ # 0, "avimages", \&pick_from_alta_vista_images,
+ # 0, "avtext", \&pick_from_alta_vista_text,
# This broke in 2004. Eh, Lycos sucks anyway.
#
- # 0, "lycos", \&pick_from_lycos_text,
+ # 0, "lycos", \&pick_from_lycos_text,
# This broke in 2003, I think. I suspect Hotbot is
# actually the same search engine data as Lycos.
#
- # 0, "hotbot", \&pick_from_hotbot_text,
+ # 0, "hotbot", \&pick_from_hotbot_text,
);
# programs we can use to write to the root window (tried in ascending order.)
# (I don't see how they did it, though!)
"alwayshotels.com" => 1, # Poisoned Lycos pretty heavily.
"nextag.com" => 1, # Poisoned Alta Vista real good.
+ "ghettodriveby.com" => 1, # Poisoned Google Images.
+ "crosswordsolver.org" => 1, # Poisoned Google Images.
+ "xona.com" => 1, # Poisoned Google Images.
+ "freepatentsonline.com" => 1, # Poisoned Google Images.
+ "herbdatanz.com" => 1, # Poisoned Google Images.
);
# site" diagnostic message.
#
my %warningless_sites = (
- "home.earthlink.net" => 1, # Lots of home pages here.
- "www.geocities.com" => 1,
+ "home.earthlink.net" => 1,
"www.angelfire.com" => 1,
"members.aol.com" => 1,
"img.photobucket.com" => 1,
"pics.livejournal.com" => 1,
"tinypic.com" => 1,
"flickr.com" => 1,
+ "staticflickr.com" => 1,
+ "pbase.com" => 1,
+ "blogger.com" => 1,
+ "multiply.com" => 1,
+ "wikimedia.org" => 1,
+ "twitpic.com" => 1,
+ "amazonaws.com" => 1,
+ "blogspot.com" => 1,
+ "photoshelter.com" => 1,
+ "myspacecdn.com" => 1,
+ "feedburner.com" => 1,
+ "wikia.com" => 1,
+ "ljplus.ru" => 1,
+ "yandex.ru" => 1,
+ "imgur.com" => 1,
+ "yfrog.com" => 1,
"yimg.com" => 1, # This is where dailynews.yahoo.com stores
"eimg.com" => 1, # its images, so pick_from_yahoo_news_text()
#
my %entity_table = (
"apos" => '\'',
- "quot" => '"', "amp" => '&', "lt" => '<', "gt" => '>',
- "nbsp" => ' ', "iexcl" => '¡', "cent" => '¢', "pound" => '£',
- "curren" => '¤', "yen" => '¥', "brvbar" => '¦', "sect" => '§',
- "uml" => '¨', "copy" => '©', "ordf" => 'ª', "laquo" => '«',
- "not" => '¬', "shy" => '', "reg" => '®', "macr" => '¯',
- "deg" => '°', "plusmn" => '±', "sup2" => '²', "sup3" => '³',
- "acute" => '´', "micro" => 'µ', "para" => '¶', "middot" => '·',
- "cedil" => '¸', "sup1" => '¹', "ordm" => 'º', "raquo" => '»',
- "frac14" => '¼', "frac12" => '½', "frac34" => '¾', "iquest" => '¿',
- "Agrave" => 'À', "Aacute" => 'Á', "Acirc" => 'Â', "Atilde" => 'Ã',
- "Auml" => 'Ä', "Aring" => 'Å', "AElig" => 'Æ', "Ccedil" => 'Ç',
- "Egrave" => 'È', "Eacute" => 'É', "Ecirc" => 'Ê', "Euml" => 'Ë',
- "Igrave" => 'Ì', "Iacute" => 'Í', "Icirc" => 'Î', "Iuml" => 'Ï',
- "ETH" => 'Ð', "Ntilde" => 'Ñ', "Ograve" => 'Ò', "Oacute" => 'Ó',
- "Ocirc" => 'Ô', "Otilde" => 'Õ', "Ouml" => 'Ö', "times" => '×',
- "Oslash" => 'Ø', "Ugrave" => 'Ù', "Uacute" => 'Ú', "Ucirc" => 'Û',
- "Uuml" => 'Ü', "Yacute" => 'Ý', "THORN" => 'Þ', "szlig" => 'ß',
- "agrave" => 'à', "aacute" => 'á', "acirc" => 'â', "atilde" => 'ã',
- "auml" => 'ä', "aring" => 'å', "aelig" => 'æ', "ccedil" => 'ç',
- "egrave" => 'è', "eacute" => 'é', "ecirc" => 'ê', "euml" => 'ë',
- "igrave" => 'ì', "iacute" => 'í', "icirc" => 'î', "iuml" => 'ï',
- "eth" => 'ð', "ntilde" => 'ñ', "ograve" => 'ò', "oacute" => 'ó',
- "ocirc" => 'ô', "otilde" => 'õ', "ouml" => 'ö', "divide" => '÷',
- "oslash" => 'ø', "ugrave" => 'ù', "uacute" => 'ú', "ucirc" => 'û',
- "uuml" => 'ü', "yacute" => 'ý', "thorn" => 'þ', "yuml" => 'ÿ',
- "ndash" => '-', "mdash" => "--"
+ "quot" => '"', "amp" => '&', "lt" => '<',
+ "gt" => '>', "nbsp" => ' ', "iexcl" => '',
+ "cent" => "\xA2", "pound" => "\xA3", "curren" => "\xA4",
+ "yen" => "\xA5", "brvbar" => "\xA6", "sect" => "\xA7",
+ "uml" => "\xA8", "copy" => "\xA9", "ordf" => "\xAA",
+ "laquo" => "\xAB", "not" => "\xAC", "shy" => "\xAD",
+ "reg" => "\xAE", "macr" => "\xAF", "deg" => "\xB0",
+ "plusmn" => "\xB1", "sup2" => "\xB2", "sup3" => "\xB3",
+ "acute" => "\xB4", "micro" => "\xB5", "para" => "\xB6",
+ "middot" => "\xB7", "cedil" => "\xB8", "sup1" => "\xB9",
+ "ordm" => "\xBA", "raquo" => "\xBB", "frac14" => "\xBC",
+ "frac12" => "\xBD", "frac34" => "\xBE", "iquest" => "\xBF",
+ "Agrave" => "\xC0", "Aacute" => "\xC1", "Acirc" => "\xC2",
+ "Atilde" => "\xC3", "Auml" => "\xC4", "Aring" => "\xC5",
+ "AElig" => "\xC6", "Ccedil" => "\xC7", "Egrave" => "\xC8",
+ "Eacute" => "\xC9", "Ecirc" => "\xCA", "Euml" => "\xCB",
+ "Igrave" => "\xCC", "Iacute" => "\xCD", "Icirc" => "\xCE",
+ "Iuml" => "\xCF", "ETH" => "\xD0", "Ntilde" => "\xD1",
+ "Ograve" => "\xD2", "Oacute" => "\xD3", "Ocirc" => "\xD4",
+ "Otilde" => "\xD5", "Ouml" => "\xD6", "times" => "\xD7",
+ "Oslash" => "\xD8", "Ugrave" => "\xD9", "Uacute" => "\xDA",
+ "Ucirc" => "\xDB", "Uuml" => "\xDC", "Yacute" => "\xDD",
+ "THORN" => "\xDE", "szlig" => "\xDF", "agrave" => "\xE0",
+ "aacute" => "\xE1", "acirc" => "\xE2", "atilde" => "\xE3",
+ "auml" => "\xE4", "aring" => "\xE5", "aelig" => "\xE6",
+ "ccedil" => "\xE7", "egrave" => "\xE8", "eacute" => "\xE9",
+ "ecirc" => "\xEA", "euml" => "\xEB", "igrave" => "\xEC",
+ "iacute" => "\xED", "icirc" => "\xEE", "iuml" => "\xEF",
+ "eth" => "\xF0", "ntilde" => "\xF1", "ograve" => "\xF2",
+ "oacute" => "\xF3", "ocirc" => "\xF4", "otilde" => "\xF5",
+ "ouml" => "\xF6", "divide" => "\xF7", "oslash" => "\xF8",
+ "ugrave" => "\xF9", "uacute" => "\xFA", "ucirc" => "\xFB",
+ "uuml" => "\xFC", "yacute" => "\xFD", "thorn" => "\xFE",
+ "yuml" => "\xFF",
+
+ # HTML 4 entities that do not have 1:1 Latin1 mappings.
+ "bull" => "*", "hellip"=> "...", "prime" => "'", "Prime" => "\"",
+ "frasl" => "/", "trade" => "[tm]", "larr" => "<-", "rarr" => "->",
+ "harr" => "<->", "lArr" => "<=", "rArr" => "=>", "hArr" => "<=>",
+ "empty" => "\xD8", "minus" => "-", "lowast"=> "*", "sim" => "~",
+ "cong" => "=~", "asymp" => "~", "ne" => "!=", "equiv" => "==",
+ "le" => "<=", "ge" => ">=", "lang" => "<", "rang" => ">",
+ "loz" => "<>", "OElig" => "OE", "oelig" => "oe", "Yuml" => "Y",
+ "circ" => "^", "tilde" => "~", "ensp" => " ", "emsp" => " ",
+ "thinsp"=> " ", "ndash" => "-", "mdash" => "--", "lsquo" => "`",
+ "rsquo" => "'", "sbquo" => "'", "ldquo" => "\"", "rdquo" => "\"",
+ "bdquo" => "\"", "lsaquo"=> "<", "rsaquo"=> ">",
);
if (!defined($timeout)) { $timeout = $http_timeout; }
if ($timeout > $http_timeout) { $timeout = $http_timeout; }
- if ($timeout <= 0) {
- LOG (($verbose_net || $verbose_load), "timed out for $url");
- return ();
- }
+ my $user_agent = "$progname/$version";
- LOG ($verbose_net, "get_document_1 $url " . ($referer ? $referer : ""));
+ if ($url =~ m@^http://www\.altavista\.com/@ ||
+ $url =~ m@^http://random\.yahoo\.com/@ ||
+ $url =~ m@^http://images\.google\.com/@ ||
+ $url =~ m@^http://www\.google\.com/@) {
+ # block this, you turkeys.
+ $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" .
+ " Gecko/20070914 Firefox/2.0.0.7";
- if (! ($url =~ m@^http://@i)) {
- LOG ($verbose_net, "not an HTTP URL: $url");
- return ();
+ # 28-Jun-2007: Google Images now emits the entire page in JS if
+ # you claim to be Gecko. They also still block "webcollage".
+ # They serve non-JS for unrecognised agents, so let's try this...
+ $user_agent = "NoJavascriptPlease/1.0"
+ if ($url =~ m@^http://[a-z]+\.google\.com/@);
}
- my ($url_proto, $dummy, $serverstring, $path) = split(/\//, $url, 4);
- $path = "" unless $path;
+ my $ua = LWP::UserAgent->new;
+ $ua->env_proxy();
+ $ua->agent ("$progname/$version");
+ $ua->default_header ('Referer' => $referer);
+ $ua->timeout($timeout) if $timeout;
- if (!$url_proto || !$serverstring) {
- LOG (($verbose_net || $verbose_load), "unparsable URL: $url");
- return ();
- }
-
- my ($them,$port) = split(/:/, $serverstring);
- $port = 80 unless $port;
-
- my $them2 = $them;
- my $port2 = $port;
- if ($http_proxy) {
- $serverstring = $http_proxy if $http_proxy;
- $serverstring =~ s@^[a-z]+://@@;
- ($them2,$port2) = split(/:/, $serverstring);
- $port2 = 80 unless $port2;
- }
-
- my ($remote, $iaddr, $paddr, $proto, $line);
- $remote = $them2;
- if ($port2 =~ /\D/) { $port2 = getservbyname($port2, 'tcp') }
- if (!$port2) {
- LOG (($verbose_net || $verbose_load), "unrecognised port in $url");
- return ();
- }
- $iaddr = inet_aton($remote);
- if (!$iaddr) {
- LOG (($verbose_net || $verbose_load), "host not found: $remote");
- return ();
+ if ($verbose_http) {
+ LOG (1, " ==> GET $url");
+ LOG (1, " ==> User-Agent: $user_agent");
+ LOG (1, " ==> Referer: $referer") if $referer;
}
- $paddr = sockaddr_in($port2, $iaddr);
+ my $res = $ua->get ($url);
- my $head = "";
- my $body = "";
+ my $http = ($res ? $res->status_line : '') || '';
+ my $head = ($res ? $res->headers()->as_string : '') || '';
+ my $body = ($res && $res->is_success ? $res->decoded_content : '') || '';
- @_ =
- eval {
- local $SIG{ALRM} = sub {
- LOG (($verbose_net || $verbose_load), "timed out ($timeout) for $url");
- die "alarm\n";
- };
- alarm $timeout;
-
- $proto = getprotobyname('tcp');
- if (!socket(S, PF_INET, SOCK_STREAM, $proto)) {
- LOG (($verbose_net || $verbose_load), "socket: $!");
- return ();
- }
- if (!connect(S, $paddr)) {
- LOG (($verbose_net || $verbose_load), "connect($serverstring): $!");
- return ();
- }
-
- select(S); $| = 1; select(STDOUT);
-
- my $cookie = $cookies{$them};
-
- my $user_agent = "$progname/$version";
-
- if ($url =~ m@^http://www\.altavista\.com/@ ||
- $url =~ m@^http://random\.yahoo\.com/@ ||
- $url =~ m@^http://images\.google\.com/@ ||
- $url =~ m@^http://www\.google\.com/@) {
- # block this, you turkeys.
- $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7)" .
- " Gecko/20070914 Firefox/2.0.0.7";
-
- # 28-Jun-2007: Google Images now emits the entire page in JS if
- # you claim to be Gecko. They also still block "webcollage".
- # They serve non-JS for unrecognised agents, so let's try this...
- $user_agent = "NoJavascriptPlease/1.0"
- if ($url =~ m@^http://[a-z]+\.google\.com/@);
- }
-
- my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" .
- "Host: $them\r\n" .
- "User-Agent: $user_agent\r\n";
- if ($referer) {
- $hdrs .= "Referer: $referer\r\n";
- }
- if ($cookie) {
- my @cc = split(/\r?\n/, $cookie);
- $hdrs .= "Cookie: " . join('; ', @cc) . "\r\n";
- }
- $hdrs .= "\r\n";
-
- foreach (split('\r?\n', $hdrs)) {
- LOG ($verbose_http, " ==> $_");
- }
- print S $hdrs;
- my $http = <S> || "";
-
- # Kludge: the Yahoo Random Link is now returning as its first
- # line "Status: 301" instead of "HTTP/1.0 301 Found". Fix it...
- #
- $http =~ s@^Status:\s+(\d+)\b@HTTP/1.0 $1@i;
+ LOG ($verbose_net, "get_document_1 $url " . ($referer ? $referer : ""));
- $_ = $http;
- s/[\r\n]+$//s;
+ $head =~ s/\r\n/\n/gs;
+ $head =~ s/\r/\n/gs;
+ if ($verbose_http) {
+ foreach (split (/\n/, $head)) {
LOG ($verbose_http, " <== $_");
-
- while (<S>) {
- $head .= $_;
- s/[\r\n]+$//s;
- last if m@^$@;
- LOG ($verbose_http, " <== $_");
-
- if (m@^Set-cookie:\s*([^;\r\n]+)@i) {
- set_cookie($them, $1)
- }
- }
-
- my $lines = 0;
- while (<S>) {
- $body .= $_;
- $lines++;
- }
-
- LOG ($verbose_http,
- " <== [ body ]: $lines lines, " . length($body) . " bytes");
-
- close S;
-
- if (!$http) {
- LOG (($verbose_net || $verbose_load), "null response: $url");
- return ();
- }
-
- $SIG{ALRM} = 'DEFAULT'; # seem to be suffering a race?
- return ( $http, $head, $body );
- };
- die if ($@ && $@ ne "alarm\n"); # propagate errors
-
- if ($@ && $@ ne "alarm\n") {
- print STDERR blurb() . "DIE " . join(" ", $@) . "\n";
- die;
+ }
}
- if ($@) {
- # timed out
- $head = undef;
- $body = undef;
- $suppress_audit = 1;
+ my @L = split(/\r\n|\r|\n/, $body);
+ my $lines = @L;
+ LOG ($verbose_http,
+ " <== [ body ]: $lines lines, " . length($body) . " bytes");
+
+ if (!$http) {
+ LOG (($verbose_net || $verbose_load), "null response: $url");
return ();
- } else {
- # didn't
- alarm 0;
- return @_;
}
+
+ return ( $http, $head, $body );
}
my %unique_urls;
foreach (split(/ *</)) {
- if ( m/^meta /i ) {
+ if ( m/^meta.*["']keywords["']/i ) {
# Likewise, reject any web pages that have a KEYWORDS meta tag
# that is too long.
#
- if (m/name ?= ?\"?keywords\"?/i &&
- m/content ?= ?\"([^\"]+)\"/) {
- my $L = length($1);
- if ($L > 1000) {
- LOG (($verbose_filter || $verbose_load),
- "excessive keywords ($L bytes) in $url: rejecting.");
- $rejected_urls{$url} = $L;
- $body = undef;
- $_ = undef;
- return ();
- } else {
- LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
- }
+ my $L = length($_);
+ if ($L > 1000) {
+ LOG (($verbose_filter || $verbose_load),
+ "excessive keywords ($L bytes) in $url: rejecting.");
+ $rejected_urls{$url} = $L;
+ $body = undef;
+ $_ = undef;
+ return ();
+ } else {
+ LOG ($verbose_filter, " keywords ($L bytes) in $url (ok)");
}
- } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) {
+ } elsif (m/^ (IMG|A) \b .* (SRC|HREF) \s* = \s* ["']? (.*?) [ "'<>] /six ||
+ m/^ (LINK|META) \b .* (REL|PROPERTY) \s* = \s*
+ ["']? (image_src|og:image) ["']? /six) {
- my $was_inline = (! ( "$1" eq "a" || "$1" eq "A" ));
+ my $was_inline = (lc($1) eq 'img');
+ my $was_meta = (lc($1) eq 'link' || lc($1) eq 'meta');
my $link = $3;
+
+ # For <link rel="image_src" href="...">
+ # and <meta property="og:image" content="...">
+ #
+ if ($was_meta) {
+ next unless (m/ (HREF|CONTENT) \s* = \s* ["']? (.*?) [ "'<>] /six);
+ $link = $2;
+ }
+
my ( $width ) = m/width ?=[ \"]*(\d+)/oi;
my ( $height ) = m/height ?=[ \"]*(\d+)/oi;
$_ = $link;
LOG ($verbose_filter,
" image $url" .
($width && $height ? " (${width}x${height})" : "") .
- ($was_inline ? " (inline)" : ""));
+ ($was_meta ? " (meta)" : $was_inline ? " (inline)" : ""));
- $urls[++$#urls] = $url;
- $unique_urls{$url}++;
- # JPEGs are preferable to GIFs and PNGs.
- $_ = $url;
- if ( ! m@[.](gif|png)$@io ) {
- $urls[++$#urls] = $url;
+ my $weight = 1;
+
+ if ($was_meta) {
+ $weight = 20; # meta tag images are far preferable to inline images.
+ } else {
+ if ($url !~ m@[.](gif|png)$@io ) {
+ $weight += 2; # JPEGs are preferable to GIFs and PNGs.
+ }
+ if (! $was_inline) {
+ $weight += 4; # pointers to images are preferable to inlined images.
+ }
}
- # pointers to images are preferable to inlined images.
- if ( ! $was_inline ) {
- $urls[++$#urls] = $url;
+ $unique_urls{$url}++;
+ for (my $i = 0; $i < $weight; $i++) {
$urls[++$#urls] = $url;
}
}
return $url;
}
+
# Given a URL and the RSS feed from that URL, pick a random image from
# the feed. This is a lot simpler than extracting images out of a page:
# we already know we have reasonable images, so we just pick one.
sub pick_dictionary() {
my @dicts = ("/usr/dict/words",
"/usr/share/dict/words",
- "/usr/share/lib/dict/words");
+ "/usr/share/lib/dict/words",
+ "/usr/share/dict/cracklib-small",
+ "/usr/share/dict/cracklib-words"
+ );
foreach my $f (@dicts) {
if (-f $f) {
$wordlist = $f;
#
sub random_word() {
- local *IN;
- if (! open (IN, "<$wordlist")) {
- return undef;
- }
+ return undef unless open (my $in, '<', $wordlist);
- my $size = (stat(IN))[7];
+ my $size = (stat($in))[7];
my $word = undef;
my $count = 0;
if (++$count > 100);
my $pos = int (rand ($size));
- if (seek (IN, $pos, 0)) {
- $word = <IN>; # toss partial line
- $word = <IN>; # keep next line
+ if (seek ($in, $pos, 0)) {
+ $word = <$in>; # toss partial line
+ $word = <$in>; # keep next line
}
next unless ($word);
last if ($word);
}
- close (IN);
+ close ($in);
if ( $word =~ s/\s/\+/gs ) { # convert intra-word spaces to "+".
$word = "\%22$word\%22"; # And put quotes (%22) around it.
sub random_words($) {
- my ($or_p) = @_;
- my $sep = ($or_p ? "%20OR%20" : "%20");
- return (random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word . $sep .
- random_word);
+ my ($sep) = @_;
+ return (random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word() . $sep .
+ random_word());
}
my @subpages;
+ if ($body =~ m/^\{\"/s) { # Google AJAX JSON response.
+
+ my @chunks = split (/"GsearchResultClass"/, $body);
+ shift @chunks;
+ my $body2 = '';
+ my $n = 1;
+ foreach (@chunks) {
+ my ($img) = m/"unescapedUrl":"(.*?)"/si;
+ my ($url) = m/"originalContextUrl":"(.*?)"/si;
+ next unless ($img && $url);
+ $url = ("/imgres" .
+ "?imgurl=" . url_quote($img) .
+ "&imgrefurl=" . url_quote($url) .
+ "&...");
+ $body2 .= "<A HREF=\"" . html_quote($url) . "\">$n</A>\n";
+ $n++;
+ }
+ $body = $body2 if $body2;
+ }
+
my $search_count = "?";
if ($body =~ m@found (approximately |about )?(<B>)?(\d+)(</B>)? image@) {
$search_count = $3;
1 while ($search_count =~ s/^(\d+)(\d{3})/$1,$2/);
# if ($search_count eq "?" || $search_count eq "0") {
-# local *OUT;
# my $file = "/tmp/wc.html";
-# open(OUT, ">$file") || error ("writing $file: $!");
-# print OUT $body;
-# close OUT;
+# open (my $out, '>', $file) || error ("writing $file: $!");
+# print $out $body;
+# close $out;
# print STDERR blurb() . "###### wrote $file\n";
# }
my ($u) = m@<A\s.*\bHREF\s*=\s*([^>]+)>@i;
next unless $u;
- if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string
- elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token
+ if (m/\bm="{(.*?)}"/s) { # Bing info is inside JSON crud
+ my $json = html_unquote($1);
+ my ($href) = ($json =~ m/\bsurl:"(.*?)"/s);
+ my ($img) = ($json =~ m/\bimgurl:"(.*?)"/s);
+ $u = "$img\t$href" if ($img && $href);
+
+ } elsif ($u =~ m/^\"([^\"]*)\"/) { $u = $1 # quoted string
+ } elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; # or token
+ }
if ( $rejected_urls{$u} ) {
LOG ($verbose_filter, " pre-rejecting candidate: $u");
############################################################################
-my $google_images_url = "http://images.google.com/images" .
- "?site=images" . # photos
- "&btnG=Search" . # graphics
- "&safe=off" . # no screening
- "&imgsafe=off" .
+my $google_images_url = "http://ajax.googleapis.com/ajax/services/" .
+ "search/images" .
+ "?v=1.0" .
+ "&rsz=large" .
"&q=";
# googleimgs
my ($timeout, $words, $max_page) = @_;
if (!defined($words)) {
- $words = random_word; # only one word for Google
+ $words = random_word(); # only one word for Google
}
- my $page = (int(rand(9)) + 1);
- my $num = 20; # 20 images per page
- my $search_url = $google_images_url . $words;
-
- if ($page > 1) {
- $search_url .= "&start=" . $page*$num; # page number
- $search_url .= "&num=" . $num; #images per page
- }
+ my $off = int(rand(40));
+ my $search_url = $google_images_url . $words . "&start=" . $off;
my ($search_hit_count, @subpages) =
pick_from_search_engine ($timeout, $search_url, $words);
next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this
next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins
- if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) {
+ $u = html_unquote($u);
+ if ($u =~ m@^/imgres\?imgurl=(.*?)&imgrefurl=(.*?)\&@) {
my $ref = $2;
my $img = $1;
- $img = "http://$img" unless ($img =~ m/^http:/i);
+ $ref = url_decode($ref);
+ $img = url_decode($img);
+
+ $img = "http://$img" unless ($img =~ m/^https?:/i);
LOG ($verbose_filter, " candidate: $ref");
push @candidates, $img;
my $i = int(rand($#photomakers + 1));
my $fn = $photomakers[$i];
my $file = &$fn;
- my $words .= $file . "%20filetype:jpg";
+ #$file .= "%20filetype:jpg";
- pick_from_google_images ($timeout, $words);
+ pick_from_google_images ($timeout, $file);
}
+\f
+############################################################################
+#
+# Pick images by feeding random words into Google Image Search.
+# By the way: fuck Microsoft.
+#
+############################################################################
+
+my $bing_images_url = "http://www.bing.com/images/async" .
+ "?CW=0" .
+ "&CH=0" .
+ "&q=";
+
+
+# bingimgs
+sub pick_from_bing_images($;$$) {
+ my ($timeout, $words, $max_page) = @_;
+
+ if (!defined($words)) {
+ $words = random_word(); # only one word for Bing
+ }
+
+ my $off = int(rand(300));
+ my $search_url = $bing_images_url . $words . "&first=" . $off;
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ my %referers;
+ foreach my $u (@subpages) {
+ my ($img, $ref) = ($u =~ m/^(.*?)\t(.*)$/s);
+ next unless $img;
+ LOG ($verbose_filter, " candidate: $ref");
+ push @candidates, $img;
+ $referers{$img} = $ref;
+ }
+
+ @candidates = depoison (@candidates);
+ return () if ($#candidates < 0);
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+ my $ref = $referers{$img};
+
+ LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)");
+ return ($ref, $img);
+}
+
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random numbers into Bing Image Search.
+#
+############################################################################
+
+# bingnums
+sub pick_from_bing_image_numbers($) {
+ my ($timeout) = @_;
+
+ my $max = 9999;
+ my $number = int(rand($max));
+
+ $number = sprintf("%04d", $number)
+ if (rand() < 0.3);
+
+ pick_from_bing_images ($timeout, "$number");
+}
+
+\f
+############################################################################
+#
+# Pick images by feeding random numbers into Bing Image Search.
+#
+############################################################################
+
+# bingphotos
+sub pick_from_bing_image_photos($) {
+ my ($timeout) = @_;
+
+ my $i = int(rand($#photomakers + 1));
+ my $fn = $photomakers[$i];
+ my $file = &$fn;
+
+ pick_from_bing_images ($timeout, $file);
+}
\f
############################################################################
sub pick_from_alta_vista_text($) {
my ($timeout) = @_;
- my $words = random_words(0);
+ my $words = random_words('%20');
my $page = (int(rand(9)) + 1);
my $search_url = $alta_vista_url . $words;
return ($search_url, $img);
}
+\f
+############################################################################
+#
+# Pick images from Twitpic's list of recently-posted images.
+#
+############################################################################
+
+my $twitpic_img_url = "http://twitpic.com/public_timeline/feed.rss";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitpic, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitpic_cache_size = 1000;
+my @twitpic_cache = (); # fifo, for ordering by age
+my %twitpic_cache = (); # hash, for detecting dups
+
+# twitpic
+sub pick_from_twitpic_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitpic_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitpic_img_url, undef, $timeout);
+
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<item)\b/\n$1/gsi;
+
+ my @items = split (/\n/, $body);
+ shift @items;
+ foreach (@items) {
+ next unless (m@<link>([^<>]*)</link>@si);
+ my $page = html_unquote ($1);
+
+ $page =~ s@/$@@s;
+ $page .= '/full';
+
+ next if ($twitpic_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page");
+ push @twitpic_cache, $page;
+ $twitpic_cache{$page} = $page;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitpic_cache == -1);
+
+ my $n = $#twitpic_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitpic_cache[$i];
+
+ # delete this one from @twitpic_cache and from %twitpic_cache.
+ #
+ @twitpic_cache = ( @twitpic_cache[0 .. $i-1],
+ @twitpic_cache[$i+1 .. $#twitpic_cache] );
+ delete $twitpic_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitpic_cache >= $twitpic_cache_size) {
+ my $page = shift @twitpic_cache;
+ delete $twitpic_cache{$page};
+ }
+
+ ( $base, $body ) = get_document ($page, undef, $timeout);
+ my $img = undef;
+ $body = '' unless defined($body);
+
+ foreach (split (/<img\s+/, $body)) {
+ my ($src) = m/\bsrc=[\"\'](.*?)[\"\']/si;
+ next unless $src;
+ next if m@/js/@s;
+ next if m@/images/@s;
+
+ $img = $src;
+
+ $img = "http:$img" if ($img =~ m@^//@s); # Oh come on
+
+ # Sometimes these images are hosted on twitpic, sometimes on Amazon.
+ if ($img =~ m@^/@) {
+ $base =~ s@^(https?://[^/]+)/.*@$1@s;
+ $img = $base . $img;
+ }
+ last;
+ }
+
+ if (!$img) {
+ LOG ($verbose_load, "no matching images on $page\n");
+ return ();
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Twitter's list of recently-posted updates.
+#
+############################################################################
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of Twitter, the page
+# of images only updates once a minute; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $twitter_img_url = "http://api.twitter.com/1/statuses/" .
+ "public_timeline.json" .
+ "?include_entities=true" .
+ "&include_rts=true" .
+ "&count=200";
+
+my $twitter_cache_size = 1000;
+
+my @twitter_cache = (); # fifo, for ordering by age
+my %twitter_cache = (); # hash, for detecting dups
+
+
+# twitter
+sub pick_from_twitter_images($) {
+ my ($timeout) = @_;
+
+ $last_search = $twitter_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($twitter_img_url, undef, $timeout);
+ # Update the cache.
+
+ if ($body) {
+ $body =~ s/[\r\n]+/ /gs;
+
+ # Parsing JSON is a pain in the ass. So we halfass it as usual.
+ $body =~ s/^\[|\]$//s;
+ $body =~ s/(\[.*?\])/{ $_ = $1; s@\},@\} @gs; $_; }/gsexi;
+ my @items = split (/},{/, $body);
+ foreach (@items) {
+ my ($name) = m@"screen_name":"([^\"]+)"@si;
+ my ($img) = m@"media_url":"([^\"]+)"@si;
+ my ($page) = m@"display_url":"([^\"]+)"@si;
+ next unless ($name && $img && $page);
+ foreach ($img, $page) {
+ s/\\//gs;
+ $_ = "http://$_" unless (m/^http/si);
+ }
+
+ next if ($twitter_cache{$page}); # already have it
+
+ LOG ($verbose_filter, " candidate: $page - $img");
+ push @twitter_cache, $page;
+ $twitter_cache{$page} = $img;
+ }
+ }
+
+ # Pull from the cache.
+
+ return () if ($#twitter_cache == -1);
+
+ my $n = $#twitter_cache+1;
+ my $i = int(rand($n));
+ my $page = $twitter_cache[$i];
+ my $url = $twitter_cache{$page};
+
+ # delete this one from @twitter_cache and from %twitter_cache.
+ #
+ @twitter_cache = ( @twitter_cache[0 .. $i-1],
+ @twitter_cache[$i+1 .. $#twitter_cache] );
+ delete $twitter_cache{$page};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#twitter_cache >= $twitter_cache_size) {
+ my $page = shift @twitter_cache;
+ delete $twitter_cache{$page};
+ }
+
+ LOG ($verbose_load, "picked page $url");
+
+ $suppress_audit = 1;
+
+ return ($page, $url);
+}
+
\f
############################################################################
#
#
############################################################################
-my $flickr_img_url = "http://www.flickr.com/photos/";
+my $flickr_img_url = "http://www.flickr.com/explore/";
# Like LiveJournal, the Flickr page of images tends to update slowly,
# so remember the last N entries on it and randomly select from those.
my $count = 0;
my $count2 = 0;
foreach (split (/\n/, $body)) {
+
my ($page, $thumb) = m@<A \s [^<>]* \b HREF=\"([^<>\"]+)\" [^<>]* > \s*
- <IMG \s [^<>]* \b SRC=\"([^<>\"]+)\" @xsi;
+ <IMG \s [^<>]* \b
+ data-defer-src = \"([^<>\"]+)\" @xsi;
next unless defined ($thumb);
$page = html_unquote ($page);
$thumb = html_unquote ($thumb);
- next unless ($thumb =~ m@^http://farm\d*\.static\.flickr\.com/@);
+ next unless ($thumb =~ m@^http://farm\d*\.static\.?flickr\.com/@);
my $base = "http://www.flickr.com/";
$page =~ s@^/@$base@;
#
############################################################################
-my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne" .
- "?format=rss_200_enc&tags=");
+my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne".
+ "?format=rss_200_enc&tagmode=any&tags=");
# Picks a random RSS feed; picks a random image from that feed;
# returns 2 URLs: the page containing the image, and the image.
sub pick_from_flickr_random($) {
my $timeout = shift;
- my $rss = $flickr_rss_base . random_word();
+ my $words = random_words(',');
+ my $rss = $flickr_rss_base . $words;
$last_search = $rss;
+ $_ = $words;
+ s/,/ /g;
+
print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "words: $_");
LOG ($verbose_load, "URL: $last_search");
$suppress_audit = 1;
return ($base, $img);
}
+\f
+############################################################################
+#
+# Pick random images from Instagram, via gramfeed.com's key.
+#
+############################################################################
+
+my $instagram_url_base = "https://api.instagram.com/v1/media/popular" .
+ "?client_id=b59fbe4563944b6c88cced13495c0f49";
+
+# instagram_random
+sub pick_from_instagram($) {
+ my $timeout = shift;
+
+ $last_search = $instagram_url_base;
+
+ print STDERR "\n\n" if ($verbose_load);
+ LOG ($verbose_load, "URL: $last_search");
+
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ if (!$base || !$body) {
+ $body = undef;
+ return;
+ }
+
+ $body =~ s/("link")/\001$1/gs;
+ my @chunks = split(/\001/, $body);
+ shift @chunks;
+ my @urls = ();
+ foreach (@chunks) {
+ s/\\//gs;
+ my ($url) = m/"link":\s*"(.*?)"/s;
+ my ($img) = m/"standard_resolution":{"url":\s*"(.*?)"/s;
+ ($img) = m/"url":\s*"(.*?)"/s unless $url;
+ next unless ($url && $img);
+ push @urls, [ $url, $img ];
+ }
+
+ if ($#urls < 0) {
+ LOG ($verbose_load, "no images on $last_search");
+ return ();
+ }
+
+ my $i = int(rand($#urls+1));
+ my ($url, $img) = @{$urls[$i]};
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#urls+1) . ": $url");
+ return ($url, $img);
+}
+
\f
############################################################################
#
$last_search = $id;
while ($now = time, $now < $start + $timeout) {
- local *DIR;
- opendir (DIR, $dir) || error ("$dir: $!");
- while (my $file = readdir(DIR)) {
+ opendir (my $dir, $dir) || error ("$dir: $!");
+ while (my $file = readdir($dir)) {
next if ($file =~ m/^\./);
$file = "$dir/$file";
- closedir DIR;
+ closedir ($dir);
LOG ($verbose_load, "picked file $file ($id)");
return ($id, $file);
}
- closedir DIR;
+ closedir ($dir);
}
LOG (($verbose_net || $verbose_load), "timed out for $id");
return ();
error ("\$driftnet_dir unset?") unless ($driftnet_dir);
my $id = $driftnet_magic;
- my $re = qr/$driftnet_dir/;
error ("$id: $file not in $driftnet_dir?")
- unless ($file =~ m@^$re@o);
+ unless ($file =~ m@^\Q$driftnet_dir@o);
- local *IN;
- open (IN, $file) || error ("$id: $file: $!");
+ open (my $in, '<', $file) || error ("$id: $file: $!");
my $body = '';
- while (<IN>) { $body .= $_; }
- close IN || error ("$id: $file: $!");
+ local $/ = undef; # read entire file
+ $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
unlink ($file) || error ("$id: $file: rm: $!");
return ($id, $body);
}
}
# local-directory
-sub pick_from_local_dir {
- my ( $timeout ) = @_;
+sub pick_from_local_dir($) {
+ my ($timeout) = @_;
my $id = $local_magic;
$last_search = $id;
my $v = ($verbose_exec ? "-v" : "");
my $pick = `xscreensaver-getimage-file $v "$dir"`;
+ $pick =~ s/\s+$//s;
+ $pick = "$dir/$pick" unless ($pick =~ m@^/@s); # relative path
LOG ($verbose_load, "picked file $pick ($id)");
return ($id, $pick);
}
-sub get_local_file {
+sub get_local_file($) {
my ($file) = @_;
error ("\$local_dir unset?") unless ($local_dir);
my $id = $local_magic;
- my $re = qr/$local_dir/;
error ("$id: $file not in $local_dir?")
- unless ($file =~ m@^$re@o);
+ unless ($file =~ m@^\Q$local_dir@o);
- local *IN;
- open (IN, $file) || error ("$id: $file: $!");
- my $body = '';
- while (<IN>) { $body .= $_; }
- close IN || error ("$id: $file: $!");
+ open (my $in, '<', $file) || error ("$id: $file: $!");
+ local $/ = undef; # read entire file
+ my $body = <$in>;
+ close ($in) || error ("$id: $file: $!");
return ($id, $body);
}
#
##############################################################################
-my $image_ppm = sprintf ("%s/webcollage-%08x",
+my $image_ppm = sprintf ("%s/webcollage-%08x.ppm",
($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
rand(0xFFFFFFFF));
-my $image_tmp1 = sprintf ("%s/webcollage-1-%08x",
+my $image_tmp1 = sprintf ("%s/webcollage-1-%08x.ppm",
($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
rand(0xFFFFFFFF));
-my $image_tmp2 = sprintf ("%s/webcollage-2-%08x",
+my $image_tmp2 = sprintf ("%s/webcollage-2-%08x.ppm",
($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
rand(0xFFFFFFFF));
$body = undef;
};
- if (($pid = open(PIPE, "| $cmd2 > $output"))) {
+ if (($pid = open (my $pipe, "| $cmd2 > $output"))) {
$timed_out = 0;
alarm $cvt_timeout;
- print PIPE $body;
+ print $pipe $body;
$body = undef;
- close PIPE;
+ close $pipe;
LOG ($verbose_exec, "awaiting $pid");
waitpid ($pid, 0);
my $pixel = pack('CCC', $r, $g, $b);
my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h));
- local *OUT;
- open (OUT, ">$outfile") || error ("$outfile: $!");
- print OUT $bits;
- close OUT;
+ open (my $out, '>', $outfile) || error ("$outfile: $!");
+ print $out $bits;
+ close $out;
}
my ($iw, $ih);
my $body = "";
- local *IMG;
- open(IMG, "<$bgimage") || error "couldn't open $bgimage: $!";
- my $cmd;
- while (<IMG>) { $body .= $_; }
- close (IMG);
+ open (my $imgf, '<', $bgimage) || error "couldn't open $bgimage: $!";
+ local $/ = undef; # read entire file
+ $body = <$imgf>;
+ close ($imgf);
+ my $cmd;
if ((@_ = gif_size ($body))) {
($iw, $ih) = @_;
$cmd = "giftopnm |";
"pasting $bgimage (${iw}x$ih) into base image at $x,$y");
$cmd .= "pnmpaste - $x $y $image_ppm > $image_tmp1";
- open (IMG, "| $cmd") || error "running $cmd: $!";
- print IMG $body;
+ open ($imgf, "| $cmd") || error "running $cmd: $!";
+ print $imgf $body;
$body = undef;
- close (IMG);
+ close ($imgf);
LOG ($verbose_exec, "subproc exited normally.");
rename ($image_tmp1, $image_ppm) ||
error "renaming $image_tmp1 to $image_ppm: $!";
return 0;
}
- local *OUT;
- open (OUT, ">$image_tmp1") || error ("writing $image_tmp1: $!");
- print OUT $body || error ("writing $image_tmp1: $!");
- close OUT || error ("writing $image_tmp1: $!");
+ open (my $out, '>', $image_tmp1) || error ("writing $image_tmp1: $!");
+ (print $out $body) || error ("writing $image_tmp1: $!");
+ close ($out) || error ("writing $image_tmp1: $!");
} else {
($iw, $ih) = image_to_pnm ($img, $body, $image_tmp1);
rename ($image_tmp2, $image_tmp1);
# re-get the width/height in case the filter resized it.
- local *IMG;
- open(IMG, "<$image_tmp1") || return 0;
- $_ = <IMG>;
- $_ = <IMG>;
+ open (my $imgf, '<', $image_tmp1) || return 0;
+ $_ = <$imgf>;
+ $_ = <$imgf>;
($iw, $ih) = m/^(\d+) (\d+)$/;
- close (IMG);
+ close ($imgf);
return 0 unless ($iw && $ih);
}
# the next network retrieval, which is probably a better thing
# to do anyway.
#
- $cmd .= " &";
+ $cmd .= " &" unless ($cocoa_p);
$rc = nontrapping_system ($cmd);
my $imagemap_html = $imagemap_base . ".html";
my $imagemap_jpg = $imagemap_base . ".jpg";
+ my $imagemap_jpg2 = $imagemap_jpg;
+ $imagemap_jpg2 =~ s@^.*/@@gs;
if (!defined ($imagemap_html_tmp)) {
$imagemap_html_tmp = $imagemap_html . sprintf (".%08x", rand(0xffffffff));
#
my $template_html = '';
{
- local *IN;
- if (open (IN, "<$imagemap_html")) {
- while (<IN>) { $template_html .= $_; }
- close IN;
+ if (open (my $in, '<', $imagemap_html)) {
+ local $/ = undef; # read entire file
+ $template_html = <$in>;
+ close $in;
LOG ($verbose_pbm, "read template $imagemap_html");
}
if ($template_html =~ m/^\s*$/s) {
$template_html = ("<MAP NAME=\"$map_name\"></MAP>\n" .
- "<IMG SRC=\"$imagemap_base.jpg\"" .
+ "<IMG SRC=\"$imagemap_jpg2\"" .
" USEMAP=\"$map_name\">\n");
LOG ($verbose_pbm, "created dummy template");
}
my $body = $template_html;
my $areas = join ("\n\t", @imagemap_areas);
my $map = ("<MAP NAME=\"$map_name\">\n\t$areas\n</MAP>");
- my $img = ("<IMG SRC=\"$imagemap_base.jpg\" " .
+ my $img = ("<IMG SRC=\"$imagemap_jpg2\" " .
"BORDER=0 " .
"WIDTH=$image_width HEIGHT=$image_height " .
"USEMAP=\"#$map_name\">");
$body =~ s@(<SPAN\s+CLASS=\"webcollage_size\">).*?(</SPAN>)@$1$size$2@si;
}
- local *OUT;
- open (OUT, ">$imagemap_html_tmp") || error ("$imagemap_html_tmp: $!");
- print OUT $body || error ("$imagemap_html_tmp: $!");
- close OUT || error ("$imagemap_html_tmp: $!");
+ open (my $out, '>', $imagemap_html_tmp) || error ("$imagemap_html_tmp: $!");
+ (print $out $body) || error ("$imagemap_html_tmp: $!");
+ close ($out) || error ("$imagemap_html_tmp: $!");
LOG ($verbose_pbm, "wrote $imagemap_html_tmp");
}
}
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
+sub set_proxy() {
+
+ if (! $http_proxy) {
+ # historical suckage: the environment variable name is lower case.
+ $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
+ }
+
+ if (defined ($http_proxy)) {
+ if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
+ # historical suckage: allow "http://host:port" as well as "host:port".
+ $http_proxy = $1;
+ }
+
+ } else {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ # Note: this ignores the "ExceptionsList".
+ if ($server) {
+ $http_proxy = $server;
+ $http_proxy .= ":$port" if $port;
+ }
+ }
+
+ if ($http_proxy) {
+ LOG ($verbose_net, "proxy server: $http_proxy");
+ }
+}
+
+
sub init_signals() {
$SIG{HUP} = \&signal_cleanup;
my $root_p = 0;
my $window_id = undef;
- # historical suckage: the environment variable name is lower case.
- $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY};
-
while ($_ = $ARGV[0]) {
shift @ARGV;
if ($_ eq "-display" ||
} else {
error ("local directory path must be set")
}
+ } elsif ($_ eq "-fps") {
+ # -fps only works on MacOS, via "webcollage-cocoa.m".
+ # Ignore it if passed to this script in an X11 context.
} elsif ($_ eq "-debug" || $_ eq "--debug") {
my $which = shift @ARGV;
my @rest = @search_methods;
"[-root] [-display dpy] [-verbose] [-debug which]\n" .
"\t\t [-timeout secs] [-delay secs] [-size WxH]\n" .
"\t\t [-no-output] [-urls-only] [-imagemap filename]\n" .
- "\t\t [-filter cmd] [-filter2 cmd] [-background color]\n" .
+ "\t\t [-background color] [-opacity f]\n" .
+ "\t\t [-filter cmd] [-filter2 cmd]\n" .
"\t\t [-dictionary dictionary-file] [-http-proxy host[:port]]\n" .
"\t\t [-driftnet [driftnet-program-and-args]]\n" .
"\t\t [-directory local-image-directory]\n" .
}
}
- if ($http_proxy && $http_proxy eq "") {
- $http_proxy = undef;
- }
- if ($http_proxy && $http_proxy =~ m@^http://([^/]*)/?$@ ) {
- # historical suckage: allow "http://host:port" as well as "host:port".
- $http_proxy = $1;
- }
-
if (!$root_p && !$no_output_p && !$cocoa_p) {
print STDERR $copyright;
error "the -root argument is mandatory (for now.)";
}
init_signals();
+ set_proxy();
spawn_driftnet ($driftnet_cmd) if ($driftnet_cmd);