#!/usr/bin/perl -w
-# Copyright © 2001-2011 Jamie Zawinski <jwz@jwz.org>.
+# Copyright © 2001-2013 Jamie Zawinski <jwz@jwz.org>.
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# prints its name. The file will be an image file whose dimensions are
# larger than a certain minimum size.
#
+# If the directory is a URL, it is assumed to be an RSS or Atom feed.
+# The images from that feed will be downloaded, cached, and selected from
+# at random. The feed will be re-polled periodically, as needed.
+#
# The various xscreensaver hacks that manipulate images ("jigsaw", etc.) get
# the image to manipulate by running the "xscreensaver-getimage" program.
#
# Perl 5.8.0 causes us to start getting incomprehensible
# errors about UTF-8 all over the place without this.
+use Digest::MD5 qw(md5_base64);
+
+# Some Linux systems don't install LWP by default!
+# Only error out if we're actually loading a URL instead of local data.
+BEGIN { eval 'use LWP::Simple;' }
+
+
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.29 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my $version = q{ $Revision: 1.35 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
my $verbose = 0;
#
my $cache_max_age = 60 * 60 * 3; # 3 hours
+# Re-poll RSS/Atom feeds when local copy is older than this many seconds.
+#
+my $feed_max_age = $cache_max_age;
+
# This matches files that we are allowed to use as images (case-insensitive.)
# Anything not matching this is ignored. This is so you can point your
}
+sub html_unquote($) {
+ my ($h) = @_;
+
+ # This only needs to handle entities that occur in RSS, not full HTML.
+ my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>',
+ 'quot' => '"', 'apos' => "'" );
+ $h =~ s/(&(\#)?([[:alpha:]\d]+);?)/
+ {
+ my ($o, $c) = ($1, $3);
+ if (! defined($2)) {
+ $c = $ent{$c}; # for <
+ } else {
+ if ($c =~ m@^x([\dA-F]+)$@si) { # for A
+ $c = chr(hex($1));
+ } elsif ($c =~ m@^\d+$@si) { # for A
+ $c = chr($c);
+ } else {
+ $c = undef;
+ }
+ }
+ ($c || $o);
+ }
+ /gexi;
+ return $h;
+}
+
+
+
+sub set_proxy($) {
+ my ($ua) = @_;
+
+ if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ if ($server) {
+ # Note: this ignores the "ExceptionsList".
+ $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/";
+ print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n"
+ if ($verbose > 2)
+ }
+ }
+
+ $ua->env_proxy();
+}
+
+
+sub init_lwp() {
+ if (! defined ($LWP::Simple::ua)) {
+ error ("\n\n\tPerl is broken. Do this to repair it:\n" .
+ "\n\tsudo cpan LWP::Simple\n");
+ }
+ set_proxy ($LWP::Simple::ua);
+}
+
+
+# Returns a list of the image enclosures in the RSS or Atom feed.
+# Elements of the list are references, [ "url", "guid" ].
+#
+sub parse_feed($);
+sub parse_feed($) {
+ my ($url) = @_;
+
+ init_lwp();
+ $LWP::Simple::ua->agent ("$progname/$version");
+ $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes
+
+ my $body = (LWP::Simple::get($url) || '');
+
+ if ($body !~ m@^<\?xml\s@si) {
+ # Not an RSS/Atom feed. Try RSS autodiscovery.
+
+ error ("not an RSS or Atom feed, or HTML: $url")
+ unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si);
+
+ # Find the first <link> with RSS or Atom in it, and use that instead.
+
+ $body =~ s@<LINK\s+([^<>]*)>@{
+ my $p = $1;
+ if ($p =~ m! \b REL \s* = \s* ['"]? alternate \b!six &&
+ $p =~ m! \b TYPE \s* = \s* ['"]? application/(atom|rss) !six &&
+ $p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six
+ ) {
+ my $u2 = html_unquote ($1);
+ print STDERR "$progname: found feed: $u2\n"
+ if ($verbose);
+ return parse_feed ($u2);
+ }
+ '';
+ }@gsexi;
+
+ error ("no RSS or Atom feed for HTML page: $url");
+ }
+
+
+ $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+ my @items = split(/\001/, $body);
+ shift @items;
+
+ my @imgs = ();
+ my %ids;
+
+ foreach my $item (@items) {
+ my $iurl = undef;
+ my $id = undef;
+
+ # First look for <link rel="enclosure" href="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<LINK[^<>]*>)!{
+ my $link = $1;
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+
+ if ($rel && lc($rel) eq 'enclosure') {
+ if ($type) {
+ $href = undef unless ($type =~ m@^image/@si); # omit videos
+ }
+ $iurl = html_unquote($href) if $href;
+ }
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <media:content url="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
+ my $link = $1;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = html_unquote($href) if $href;
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <enclosure url="..."/>
+ #
+ if (! $iurl) {
+ $item =~ s!(<ENCLOSURE[^<>]*>)!{
+ my $link = $1;
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = html_unquote($href)
+ if ($href && $type && $type =~ m@^image/@si); # omit videos
+ $link;
+ }!gsexi;
+ }
+
+ # Ok, maybe there's an image in the <url> field?
+ #
+ if (! $iurl) {
+ $item =~ s!((<URL\b[^<>]*>)([^<>]*))!{
+ my ($all, $u2) = ($1, $3);
+ $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io);
+ $all;
+ }!gsexi;
+ }
+
+ # Then look for <description>... with an <img src="..."> inside.
+ #
+ if (! $iurl) {
+ $item =~ s!(<description[^<>]*>.*?</description>)!{
+ my $desc = $1;
+ $desc = html_unquote($desc);
+ my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+ $iurl = $href if ($href);
+ $desc;
+ }!gsexi;
+ }
+
+ # Could also do <content:encoded>, but the above probably covers all
+ # of the real-world possibilities.
+
+
+ # Find a unique ID for this image, to defeat image farms.
+ # First look for <id>...</id>
+ ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+
+ # Then look for <guid isPermaLink=...> ... </guid>
+ ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+
+ # Then look for <link> ... </link>
+ ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+
+
+ if ($iurl) {
+ $id = $iurl unless $id;
+ my $o = $ids{$id};
+ if (! $o) {
+ $ids{$id} = $iurl;
+ my @P = ($iurl, $id);
+ push @imgs, \@P;
+ } elsif ($iurl ne $o) {
+ print STDERR "$progname: WARNING: dup ID \"$id\"" .
+ " for \"$o\" and \"$iurl\"\n";
+ }
+ }
+ }
+
+ return @imgs;
+}
+
+
+# Like md5_base64 but uses filename-safe characters.
+#
+sub md5_file($) {
+ my ($s) = @_;
+ $s = md5_base64($s);
+ $s =~ s@[/]@_@gs;
+ $s =~ s@[+]@-@gs;
+ return $s;
+}
+
+
+# Given the URL of an image, download it into the given directory
+# and return the file name.
+#
+sub download_image($$$) {
+ my ($url, $uid, $dir) = @_;
+
+ my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ my $file = md5_file ($uid);
+ $file .= '.' . lc($ext) if $ext;
+
+ # Don't bother doing If-Modified-Since to see if the URL has changed.
+ # If we have already downloaded it, assume it's good.
+ if (-f "$dir/$file") {
+ print STDERR "$progname: exists: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ return $file;
+ }
+
+ # Special-case kludge for Flickr:
+ # Their RSS feeds sometimes include only the small versions of the images.
+ # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then
+ # munge it to be "b" (1024x1024).
+ #
+ $url =~ s@_[stm](\.[a-z]+)$@_b$1@si
+ if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
+
+ print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ init_lwp();
+ $LWP::Simple::ua->agent ("$progname/$version");
+ my $status = LWP::Simple::mirror ($url, "$dir/$file");
+ if (!LWP::Simple::is_success ($status)) {
+ print STDERR "$progname: error $status: $url\n"; # keep going
+ }
+
+ return $file;
+}
+
+
+sub mirror_feed($) {
+ my ($url) = @_;
+
+ if ($url !~ m/^https?:/si) { # not a URL: local directory.
+ return (undef, $url);
+ }
+
+ my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
+ if (-d $dir) {
+ $dir = "$dir/org.jwz.xscreensaver.feeds";
+ } elsif (-d "$ENV{HOME}/tmp") {
+ $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
+ } else {
+ $dir = "$ENV{HOME}/.xscreensaver-feeds";
+ }
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/\n" if ($verbose);
+ }
+
+ # MD5 for directory name to use for cache of a feed URL.
+ $dir .= '/' . md5_file ($url);
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose);
+ }
+
+ # At this point, we have the directory corresponding to this URL.
+ # Now check to see if the files in it are up to date, and download
+ # them if not.
+
+ my $stamp = '.timestamp';
+ my $lock = "$dir/$stamp";
+
+ print STDERR "$progname: awaiting lock: $lock\n"
+ if ($verbose > 1);
+
+ my $mtime = ((stat($lock))[9]) || 0;
+
+ my $lock_fd;
+ open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!");
+ flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+
+ my $poll_p = ($mtime + $feed_max_age < time);
+
+ $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg.
+
+ # Even if the cache is young, let's make sure there are at least
+ # a few files in it, and re-check if not.
+ #
+ if (! $poll_p) {
+ my $count = 0;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f =~ m/^\./s);
+ $count++;
+ last;
+ }
+ closedir $dirh;
+
+ if ($count <= 0) {
+ print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+ $poll_p = 1;
+ }
+ }
+
+ if ($poll_p) {
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
+
+ my %files;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f eq '.' || $f eq '..');
+ $files{$f} = 0; # 0 means "file exists, should be deleted"
+ }
+ closedir $dirh;
+
+ $files{$stamp} = 1;
+
+ # Download each image currently in the feed.
+ #
+ my $count = 0;
+ my @urls = parse_feed ($url);
+ print STDERR "$progname: " . ($#urls + 1) . " images\n"
+ if ($verbose > 1);
+ foreach my $p (@urls) {
+ my ($furl, $id) = @$p;
+ my $f = download_image ($furl, $id, $dir);
+ next unless $f;
+ $files{$f} = 1; # Got it, don't delete
+ $count++;
+ }
+
+ print STDERR "$progname: empty feed: $url\n" if ($count <= 0);
+
+ # Now delete any files that are no longer in the feed.
+ # But if there was nothing in the feed (network failure?)
+ # then don't blow away the old files.
+ #
+ my $kept = 0;
+ foreach my $f (keys(%files)) {
+ if ($count <= 0) {
+ $kept++;
+ } elsif ($files{$f}) {
+ $kept++;
+ } else {
+ if (unlink ("$dir/$f")) {
+ print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1);
+ } else {
+ print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail
+ }
+ }
+ }
+
+ # Both feed and cache are empty. No files at all.
+ error ("empty feed: $url") if ($kept <= 1);
+
+ $mtime = time(); # update the timestamp
+
+ } else {
+
+ # Not yet time to re-check the URL.
+ print STDERR "$progname: using cache: $url\n" if ($verbose);
+
+ }
+
+ # Unlock and update the write date on the .timestamp file.
+ #
+ truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+ utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!");
+ flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!");
+ close ($lock_fd);
+ $lock_fd = undef;
+ print STDERR "$progname: unlocked $lock\n" if ($verbose > 1);
+
+ # Don't bother using the imageDirectory cache. We know that this directory
+ # is flat, and we can assume that an RSS feed doesn't contain 100,000 images
+ # like ~/Pictures/ might.
+ #
+ $cache_p = 0;
+
+ # Return the URL and directory name of the files of that URL's local cache.
+ #
+ return ($url, $dir);
+}
+
+
sub find_random_file($) {
my ($dir) = @_;
}
}
+ my $url;
+ ($url, $dir) = mirror_feed ($dir);
+
+ if ($url) {
+ $use_spotlight_p = 0;
+ print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1);
+ }
+
@all_files = read_cache ($dir);
if ($#all_files >= 0) {
my $n = int (rand ($#all_files + 1));
my $file = $all_files[$n];
if (large_enough_p ($file)) {
- $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ if (! $url) {
+ $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ }
return $file;
}
}
print STDERR "$progname: no suitable images in $dir " .
"(after $max_tries tries)\n";
+
+ # If we got here, blow away the cache. Maybe it's stale.
+ unlink $cache_file_name if $cache_file_name;
+
exit 1;
}
my ($w, $h) = image_file_size ($file);
if (!defined ($h)) {
+
+ # Nonexistent files are obviously too small!
+ # Already printed $verbose message about the file not existing.
+ return 0 unless -f $file;
+
print STDERR "$progname: $file: unable to determine image size\n"
if ($verbose);
# Assume that unknown files are of good sizes: this will happen if
my $in;
if (! open ($in, '<', $file)) {
print STDERR "$progname: $file: $!\n" if ($verbose);
- return undef;
+ return ();
}
binmode ($in); # Larry can take Unicode and shove it up his ass sideways.
my $body = '';
print STDERR "usage: $progname [--verbose] directory\n" .
" Prints the name of a randomly-selected image file. The directory\n" .
" is searched recursively. Images smaller than " .
- "${min_image_width}x${min_image_height} are excluded.\n";
+ "${min_image_width}x${min_image_height} are excluded.\n" .
+ "\n" .
+ " The directory may also be the URL of an RSS/Atom feed. Enclosed\n" .
+ " images will be downloaded and cached locally.\n" .
+ "\n";
exit 1;
}
usage unless (defined($dir));
- $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
- $dir =~ s@/+$@@s; # omit trailing /
+ $dir =~ s@^feed:@http:@si;
- if (! -d $dir) {
- print STDERR "$progname: $dir: not a directory\n";
- usage;
+ if ($dir =~ m/^https?:/si) {
+ # ok
+ } else {
+ $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+ $dir =~ s@/+$@@s; # omit trailing /
+
+ if (! -d $dir) {
+ print STDERR "$progname: $dir: not a directory or URL\n";
+ usage;
+ }
}
my $file = find_random_file ($dir);