#!/usr/bin/perl -w
-# Copyright © 2001-2006 Jamie Zawinski <jwz@jwz.org>.
+# Copyright © 2001-2011 Jamie Zawinski <jwz@jwz.org>.
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# prints its name. The file will be an image file whose dimensions are
# larger than a certain minimum size.
#
+# If the directory is a URL, it is assumed to be an RSS or Atom feed.
+# The images from that feed will be downloaded, cached, and selected from
+# at random. The feed will be re-polled periodically, as needed.
+#
# The various xscreensaver hacks that manipulate images ("jigsaw", etc.) get
# the image to manipulate by running the "xscreensaver-getimage" program.
#
# Under X11, the "xscreensaver-getimage" program invokes this script,
# depending on the value of the "chooseRandomImages" and "imageDirectory"
# settings in the ~/.xscreensaver file (or .../app-defaults/XScreenSaver).
+# The screen savers invoke "xscreensaver-getimage" via utils/grabclient.c,
+# which then invokes this script.
#
# Under Cocoa, this script lives inside the .saver bundle, and is invoked
# directly from utils/grabclient.c.
# Created: 12-Apr-01.
require 5;
-use diagnostics;
+#use diagnostics; # Fails on some MacOS 10.5 systems
use strict;
use POSIX;
use Fcntl ':flock'; # import LOCK_* constants
use POSIX ':fcntl_h'; # S_ISDIR was here in Perl 5.6
-import Fcntl ':mode' unless defined &S_ISDIR; # but it is here in Perl 5.8
+import Fcntl ':mode' unless defined &S_ISUID; # but it is here in Perl 5.8
+ # but in Perl 5.10, both of these load, and cause errors!
+ # So we have to check for S_ISUID instead of S_ISDIR? WTF?
use bytes; # Larry can take Unicode and shove it up his ass sideways.
# Perl 5.8.0 causes us to start getting incomprehensible
# errors about UTF-8 all over the place without this.
+use Digest::MD5 qw(md5_base64);
+use LWP::Simple qw($ua);
+
+
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.25 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my $version = q{ $Revision: 1.30 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
my $verbose = 0;
#
my $cache_max_age = 60 * 60 * 3; # 3 hours
+# Re-poll RSS/Atom feeds when local copy is older than this many seconds.
+#
+my $feed_max_age = $cache_max_age;
+
# This matches files that we are allowed to use as images (case-insensitive.)
# Anything not matching this is ignored. This is so you can point your
my $skip_count_unstat = 0; # number of files skipped without stat'ing
my $skip_count_stat = 0; # number of files skipped after stat
-sub find_all_files {
+sub find_all_files($);
+sub find_all_files($) {
my ($dir) = @_;
print STDERR "$progname: + reading dir $dir/...\n" if ($verbose > 1);
- local *DIR;
- if (! opendir (DIR, $dir)) {
+ my $dd;
+ if (! opendir ($dd, $dir)) {
print STDERR "$progname: couldn't open $dir: $!\n" if ($verbose);
return;
}
- my @files = readdir (DIR);
- closedir (DIR);
+ my @files = readdir ($dd);
+ closedir ($dd);
my @dirs = ();
}
-sub spotlight_all_files {
+sub spotlight_all_files($) {
my ($dir) = @_;
my @terms = ();
# running at once, one will wait for the other, instead of both of
# them spanking the same file system at the same time.
#
-local *CACHE_FILE;
+my $cache_fd = undef;
my $cache_file_name = undef;
my $read_cache_p = 0;
$cache_file_name = "$ENV{HOME}/.xscreensaver-getimage.cache";
}
- my $file = $cache_file_name;
- open (CACHE_FILE, "+>>$file") || error ("unable to write $file: $!");
- flock (CACHE_FILE, LOCK_EX) || error ("unable to lock $file: $!");
- seek (CACHE_FILE, 0, 0) || error ("unable to rewind $file: $!");
-
- print STDERR "$progname: reading cache $cache_file_name\n"
+ print STDERR "$progname: awaiting lock: $cache_file_name\n"
if ($verbose > 1);
- my $mtime = (stat(CACHE_FILE))[9];
+ my $file = $cache_file_name;
+ open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!");
+ flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!");
+ seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!");
+
+ my $mtime = (stat($cache_fd))[9];
if ($mtime + $cache_max_age < time) {
print STDERR "$progname: cache is too old\n" if ($verbose);
return ();
}
- my $odir = <CACHE_FILE>;
+ my $odir = <$cache_fd>;
$odir =~ s/[\r\n]+$//s if defined ($odir);
if (!defined ($odir) || ($dir ne $odir)) {
print STDERR "$progname: cache is for $odir, not $dir\n"
}
my @files = ();
- while (<CACHE_FILE>) {
+ while (<$cache_fd>) {
s/[\r\n]+$//s;
push @files, "$odir/$_";
}
if (! $read_cache_p) {
- truncate (CACHE_FILE, 0) ||
+ truncate ($cache_fd, 0) ||
error ("unable to truncate $cache_file_name: $!");
- seek (CACHE_FILE, 0, 0) ||
+ seek ($cache_fd, 0, 0) ||
error ("unable to rewind $cache_file_name: $!");
if ($#all_files >= 0) {
- print CACHE_FILE "$dir\n";
- my $re = qr/$dir/;
+ print $cache_fd "$dir\n";
foreach (@all_files) {
my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
- $f =~ s@^$re/@@so || die;
- print CACHE_FILE "$f\n";
+ $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ print $cache_fd "$f\n";
}
}
if ($verbose);
}
- flock (CACHE_FILE, LOCK_UN) ||
+ flock ($cache_fd, LOCK_UN) ||
error ("unable to unlock $cache_file_name: $!");
- close (CACHE_FILE);
+ close ($cache_fd);
+ $cache_fd = undef;
+}
+
+
+# Returns a list of the image enclosures in the RSS or Atom feed.
+# Elements of the list are references, [ "url", "guid" ].
+#
+sub parse_feed($) {
+ my ($url) = @_;
+
+ $ua->agent ("$progname/$version");
+ $ua->timeout (10); # bail sooner than the default of 3 minutes
+
+ my $body = (LWP::Simple::get($url) || '');
+
+ error ("not an RSS or Atom feed: $url")
+ unless ($body =~ m@^<\?xml\s@si);
+
+ $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+ my @items = split(/\001/, $body);
+ shift @items;
+
+ my @imgs = ();
+ my %ids;
+
+ foreach my $item (@items) {
+ my $iurl = undef;
+ my $id = undef;
+
+ # First look for <link rel="enclosure" href="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<LINK[^<>]*>)!{
+ my $link = $1;
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+
+ if ($rel && lc($rel) eq 'enclosure') {
+ if ($type) {
+ $href = undef unless ($type =~ m@^image/@si); # omit videos
+ }
+ $iurl = $href if ($href);
+ }
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <media:content url="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
+ my $link = $1;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = $href if $href;
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <description>... with an <img href="..."> inside.
+ #
+ if (! $iurl) {
+ $item =~ s!(<description[^<>]*>.*?</description>)!{
+ my $desc = $1;
+ $desc =~ s/</</gs;
+ $desc =~ s/>/>/gs;
+ $desc =~ s/"/\"/gs;
+ $desc =~ s/'/\'/gs;
+ $desc =~ s/&/&/gs;
+ my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+ $iurl = $href if ($href);
+ $desc;
+ }!gsexi;
+ }
+
+ # Could also do <content:encoded>, but the above probably covers all
+ # of the real-world possibilities.
+
+
+ # Find a unique ID for this image, to defeat image farms.
+ # First look for <id>...</id>
+ ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+
+ # Then look for <guid isPermaLink=...> ... </guid>
+ ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+
+ # Then look for <link> ... </link>
+ ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+
+
+ if ($iurl) {
+ $id = $iurl unless $id;
+ my $o = $ids{$id};
+ if (! $o) {
+ $ids{$id} = $iurl;
+ my @P = ($iurl, $id);
+ push @imgs, \@P;
+ } elsif ($iurl ne $o) {
+ print STDERR "$progname: WARNING: dup ID \"$id\"" .
+ " for \"$o\" and \"$iurl\"\n";
+ }
+ }
+ }
+
+ return @imgs;
+}
+
+
+# Like md5_base64 but uses filename-safe characters.
+#
+sub md5_file($) {
+ my ($s) = @_;
+ $s = md5_base64($s);
+ $s =~ s@[/]@_@gs;
+ $s =~ s@[+]@-@gs;
+ return $s;
+}
+
+
+# Given the URL of an image, download it into the given directory
+# and return the file name.
+#
+sub download_image($$$) {
+ my ($url, $uid, $dir) = @_;
+
+ my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ my $file = md5_file ($uid);
+ $file .= '.' . lc($ext) if $ext;
+
+ # Don't bother doing If-Modified-Since to see if the URL has changed.
+ # If we have already downloaded it, assume it's good.
+ if (-f "$dir/$file") {
+ print STDERR "$progname: exists: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ return $file;
+ }
+
+ # Special-case kludge for Flickr:
+ # Their RSS feeds sometimes include only the small versions of the images.
+ # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then
+ # munge it to be "b" (1024x1024).
+ #
+ $url =~ s@_[stm](\.[a-z]+)$@_b$1@si
+ if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
+
+ print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ $ua->agent ("$progname/$version");
+ my $status = LWP::Simple::mirror ($url, "$dir/$file");
+ if (!LWP::Simple::is_success ($status)) {
+ print STDERR "$progname: error $status: $url\n"; # keep going
+ }
+
+ return $file;
+}
+
+
+sub mirror_feed($) {
+ my ($url) = @_;
+
+ if ($url !~ m/^https?:/si) { # not a URL: local directory.
+ return (undef, $url);
+ }
+
+ my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
+ if (-d $dir) {
+ $dir = "$dir/org.jwz.xscreensaver.feeds";
+ } elsif (-d "$ENV{HOME}/tmp") {
+ $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
+ } else {
+ $dir = "$ENV{HOME}/.xscreensaver-feeds";
+ }
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/\n" if ($verbose);
+ }
+
+ # MD5 for directory name to use for cache of a feed URL.
+ $dir .= '/' . md5_file ($url);
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose);
+ }
+
+ # At this point, we have the directory corresponding to this URL.
+ # Now check to see if the files in it are up to date, and download
+ # them if not.
+
+ my $stamp = '.timestamp';
+ my $lock = "$dir/$stamp";
+
+ print STDERR "$progname: awaiting lock: $lock\n"
+ if ($verbose > 1);
+
+ my $mtime = ((stat($lock))[9]) || 0;
+
+ my $lock_fd;
+ open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!");
+ flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+
+ my $poll_p = ($mtime + $feed_max_age < time);
+
+ $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg.
+
+ # Even if the cache is young, let's make sure there are at least
+ # a few files in it, and re-check if not.
+ #
+ if (! $poll_p) {
+ my $count = 0;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f =~ m/^\./s);
+ $count++;
+ last;
+ }
+ closedir $dirh;
+
+ if ($count <= 0) {
+ print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+ $poll_p = 1;
+ }
+ }
+
+ if ($poll_p) {
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
+
+ my %files;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f eq '.' || $f eq '..');
+ $files{$f} = 0; # 0 means "file exists, should be deleted"
+ }
+ closedir $dirh;
+
+ $files{$stamp} = 1;
+
+ # Download each image currently in the feed.
+ #
+ my $count = 0;
+ my @urls = parse_feed ($url);
+ foreach my $p (@urls) {
+ my ($furl, $id) = @$p;
+ my $f = download_image ($furl, $id, $dir);
+ next unless $f;
+ $files{$f} = 1; # Got it, don't delete
+ $count++;
+ }
+
+ print STDERR "$progname: empty feed: $url\n" if ($count <= 0);
+
+ # Now delete any files that are no longer in the feed.
+ # But if there was nothing in the feed (network failure?)
+ # then don't blow away the old files.
+ #
+ my $kept = 0;
+ foreach my $f (keys(%files)) {
+ if ($count <= 0) {
+ $kept++;
+ } elsif ($files{$f}) {
+ $kept++;
+ } else {
+ if (unlink ("$dir/$f")) {
+ print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1);
+ } else {
+ print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail
+ }
+ }
+ }
+
+ # Both feed and cache are empty. No files at all.
+ error ("empty feed: $url") if ($kept <= 1);
+
+ $mtime = time(); # update the timestamp
+
+ } else {
+
+ # Not yet time to re-check the URL.
+ print STDERR "$progname: using cache: $url\n" if ($verbose);
+
+ }
+
+ # Unlock and update the write date on the .timestamp file.
+ #
+ truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+ utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!");
+ flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!");
+ close ($lock_fd);
+ $lock_fd = undef;
+ print STDERR "$progname: unlocked $lock\n" if ($verbose > 1);
+
+ # Don't bother using the imageDirectory cache. We know that this directory
+ # is flat, and we can assume that an RSS feed doesn't contain 100,000 images
+ # like ~/Pictures/ might.
+ #
+ $cache_p = 0;
+
+ # Return the URL and directory name of the files of that URL's local cache.
+ #
+ return ($url, $dir);
}
sub find_random_file($) {
my ($dir) = @_;
- $dir =~ s@/+$@@g;
-
if ($use_spotlight_p == -1) {
$use_spotlight_p = 0;
if (-x '/usr/bin/mdfind') {
}
}
+ my $url;
+ ($url, $dir) = mirror_feed ($dir);
+
+ if ($url) {
+ $use_spotlight_p = 0;
+ print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1);
+ }
+
@all_files = read_cache ($dir);
if ($#all_files >= 0) {
write_cache ($dir);
- @all_files = sort(@all_files);
+# @all_files = sort(@all_files);
if ($#all_files < 0) {
print STDERR "$progname: no files in $dir\n";
my $n = int (rand ($#all_files + 1));
my $file = $all_files[$n];
if (large_enough_p ($file)) {
+ if (! $url) {
+ $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ }
return $file;
}
}
}
-sub large_enough_p {
+sub large_enough_p($) {
my ($file) = @_;
my ($w, $h) = image_file_size ($file);
# Given the raw body of a GIF document, returns the dimensions of the image.
#
-sub gif_size {
+sub gif_size($) {
my ($body) = @_;
my $type = substr($body, 0, 6);
my $s;
# Given the raw body of a JPEG document, returns the dimensions of the image.
#
-sub jpeg_size {
+sub jpeg_size($) {
my ($body) = @_;
my $i = 0;
my $L = length($body);
# Given the raw body of a PNG document, returns the dimensions of the image.
#
-sub png_size {
+sub png_size($) {
my ($body) = @_;
return () unless ($body =~ m/^\211PNG\r/s);
my ($bits) = ($body =~ m/^.{12}(.{12})/s);
# Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions
# of the image.
#
-sub image_size {
+sub image_size($) {
my ($body) = @_;
return () if (length($body) < 10);
my ($w, $h) = gif_size ($body);
# Returns the dimensions of the image file.
#
-sub image_file_size {
+sub image_file_size($) {
my ($file) = @_;
- local *IN;
- if (! open (IN, "<$file")) {
+ my $in;
+ if (! open ($in, '<', $file)) {
print STDERR "$progname: $file: $!\n" if ($verbose);
return undef;
}
- binmode (IN); # Larry can take Unicode and shove it up his ass sideways.
+ binmode ($in); # Larry can take Unicode and shove it up his ass sideways.
my $body = '';
- sysread (IN, $body, 1024 * 50); # The first 50k should be enough.
- close IN; # (It's not for certain huge jpegs...
+ sysread ($in, $body, 1024 * 50); # The first 50k should be enough.
+ close $in; # (It's not for certain huge jpegs...
return image_size ($body); # but we know they're huge!)
}
exit 1;
}
-sub usage {
+sub usage() {
print STDERR "usage: $progname [--verbose] directory\n" .
" Prints the name of a randomly-selected image file. The directory\n" .
" is searched recursively. Images smaller than " .
- "${min_image_width}x${min_image_height} are excluded.\n";
+ "${min_image_width}x${min_image_height} are excluded.\n" .
+ "\n" .
+ " The directory may also be the URL of an RSS/Atom feed. Enclosed\n" .
+ " images will be downloaded cached locally.\n" .
+ "\n";
exit 1;
}
-sub main {
+sub main() {
my $dir = undef;
while ($_ = $ARGV[0]) {
usage unless (defined($dir));
- $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+ $dir =~ s@^feed:@http:@si;
- if (! -d $dir) {
- print STDERR "$progname: $dir: not a directory\n";
- usage;
+ if ($dir =~ m/^https?:/si) {
+ # ok
+ } else {
+ $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+ $dir =~ s@/+$@@s; # omit trailing /
+
+ if (! -d $dir) {
+ print STDERR "$progname: $dir: not a directory or URL\n";
+ usage;
+ }
}
my $file = find_random_file ($dir);