+ my $file = $cache_file_name;
+ open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!");
+ flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!");
+ seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!");
+
+ my $mtime = (stat($cache_fd))[9];
+
+ if ($mtime + $cache_max_age < time) {
+ print STDERR "$progname: cache is too old\n" if ($verbose);
+ return ();
+ }
+
+ my $odir = <$cache_fd>;
+ $odir =~ s/[\r\n]+$//s if defined ($odir);
+ if (!defined ($odir) || ($dir ne $odir)) {
+ print STDERR "$progname: cache is for $odir, not $dir\n"
+ if ($verbose && $odir);
+ return ();
+ }
+
+ my @files = ();
+ while (<$cache_fd>) {
+ s/[\r\n]+$//s;
+ push @files, "$odir/$_";
+ }
+
+ print STDERR "$progname: " . ($#files+1) . " files in cache\n"
+ if ($verbose);
+
+ $read_cache_p = 1;
+ return @files;
+}
+
+
+sub write_cache($) {
+ my ($dir) = @_;
+
+ return unless ($cache_p);
+
+ # If we read the cache, just close it without rewriting it.
+ # If we didn't read it, then write it now.
+
+ if (! $read_cache_p) {
+
+ truncate ($cache_fd, 0) ||
+ error ("unable to truncate $cache_file_name: $!");
+ seek ($cache_fd, 0, 0) ||
+ error ("unable to rewind $cache_file_name: $!");
+
+ if ($#all_files >= 0) {
+ print $cache_fd "$dir\n";
+ foreach (@all_files) {
+ my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
+ $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ print $cache_fd "$f\n";
+ }
+ }
+
+ print STDERR "$progname: cached " . ($#all_files+1) . " files\n"
+ if ($verbose);
+ }
+
+ flock ($cache_fd, LOCK_UN) ||
+ error ("unable to unlock $cache_file_name: $!");
+ close ($cache_fd);
+ $cache_fd = undef;
+}
+
+
+# Returns a list of the image enclosures in the RSS or Atom feed.
+# Elements of the list are references, [ "url", "guid" ].
+#
+sub parse_feed($) {
+ my ($url) = @_;
+
+ $ua->agent ("$progname/$version");
+ $ua->timeout (10); # bail sooner than the default of 3 minutes
+
+ my $body = (LWP::Simple::get($url) || '');
+
+ error ("not an RSS or Atom feed: $url")
+ unless ($body =~ m@^<\?xml\s@si);
+
+ $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+ my @items = split(/\001/, $body);
+ shift @items;
+
+ my @imgs = ();
+ my %ids;
+
+ foreach my $item (@items) {
+ my $iurl = undef;
+ my $id = undef;
+
+ # First look for <link rel="enclosure" href="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<LINK[^<>]*>)!{
+ my $link = $1;
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+
+ if ($rel && lc($rel) eq 'enclosure') {
+ if ($type) {
+ $href = undef unless ($type =~ m@^image/@si); # omit videos
+ }
+ $iurl = $href if ($href);
+ }
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <media:content url="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
+ my $link = $1;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = $href if $href;
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <description>... with an <img href="..."> inside.
+ #
+ if (! $iurl) {
+ $item =~ s!(<description[^<>]*>.*?</description>)!{
+ my $desc = $1;
+ $desc =~ s/</</gs;
+ $desc =~ s/>/>/gs;
+ $desc =~ s/"/\"/gs;
+ $desc =~ s/'/\'/gs;
+ $desc =~ s/&/&/gs;
+ my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+ $iurl = $href if ($href);
+ $desc;
+ }!gsexi;
+ }
+
+ # Could also do <content:encoded>, but the above probably covers all
+ # of the real-world possibilities.
+
+
+ # Find a unique ID for this image, to defeat image farms.
+ # First look for <id>...</id>
+ ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+
+ # Then look for <guid isPermaLink=...> ... </guid>
+ ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+
+ # Then look for <link> ... </link>
+ ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+
+
+ if ($iurl) {
+ $id = $iurl unless $id;
+ my $o = $ids{$id};
+ if (! $o) {
+ $ids{$id} = $iurl;
+ my @P = ($iurl, $id);
+ push @imgs, \@P;
+ } elsif ($iurl ne $o) {
+ print STDERR "$progname: WARNING: dup ID \"$id\"" .
+ " for \"$o\" and \"$iurl\"\n";
+ }
+ }
+ }
+
+ return @imgs;
+}
+
+
+# Like md5_base64 but uses filename-safe characters.
+#
+sub md5_file($) {
+ my ($s) = @_;
+ $s = md5_base64($s);
+ $s =~ s@[/]@_@gs;
+ $s =~ s@[+]@-@gs;
+ return $s;
+}
+
+
+# Given the URL of an image, download it into the given directory
+# and return the file name.
+#
+sub download_image($$$) {
+ my ($url, $uid, $dir) = @_;
+
+ my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ my $file = md5_file ($uid);
+ $file .= '.' . lc($ext) if $ext;
+
+ # Don't bother doing If-Modified-Since to see if the URL has changed.
+ # If we have already downloaded it, assume it's good.
+ if (-f "$dir/$file") {
+ print STDERR "$progname: exists: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ return $file;
+ }
+
+ # Special-case kludge for Flickr:
+ # Their RSS feeds sometimes include only the small versions of the images.
+ # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then
+ # munge it to be "b" (1024x1024).
+ #
+ $url =~ s@_[stm](\.[a-z]+)$@_b$1@si
+ if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
+
+ print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ $ua->agent ("$progname/$version");
+ my $status = LWP::Simple::mirror ($url, "$dir/$file");
+ if (!LWP::Simple::is_success ($status)) {
+ print STDERR "$progname: error $status: $url\n"; # keep going
+ }
+
+ return $file;
+}
+
+
+sub mirror_feed($) {
+ my ($url) = @_;
+
+ if ($url !~ m/^https?:/si) { # not a URL: local directory.
+ return (undef, $url);
+ }
+
+ my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
+ if (-d $dir) {
+ $dir = "$dir/org.jwz.xscreensaver.feeds";
+ } elsif (-d "$ENV{HOME}/tmp") {
+ $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
+ } else {
+ $dir = "$ENV{HOME}/.xscreensaver-feeds";
+ }
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/\n" if ($verbose);
+ }
+
+ # MD5 for directory name to use for cache of a feed URL.
+ $dir .= '/' . md5_file ($url);
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose);
+ }
+
+ # At this point, we have the directory corresponding to this URL.
+ # Now check to see if the files in it are up to date, and download
+ # them if not.
+
+ my $stamp = '.timestamp';
+ my $lock = "$dir/$stamp";
+
+ print STDERR "$progname: awaiting lock: $lock\n"
+ if ($verbose > 1);
+
+ my $mtime = ((stat($lock))[9]) || 0;
+
+ my $lock_fd;
+ open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!");
+ flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+
+ my $poll_p = ($mtime + $feed_max_age < time);
+
+ $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg.
+
+ # Even if the cache is young, let's make sure there are at least
+ # a few files in it, and re-check if not.
+ #
+ if (! $poll_p) {
+ my $count = 0;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f =~ m/^\./s);
+ $count++;
+ last;
+ }
+ closedir $dirh;
+
+ if ($count <= 0) {
+ print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+ $poll_p = 1;
+ }
+ }
+
+ if ($poll_p) {
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
+
+ my %files;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f eq '.' || $f eq '..');
+ $files{$f} = 0; # 0 means "file exists, should be deleted"
+ }
+ closedir $dirh;
+
+ $files{$stamp} = 1;
+
+ # Download each image currently in the feed.
+ #
+ my $count = 0;
+ my @urls = parse_feed ($url);
+ foreach my $p (@urls) {
+ my ($furl, $id) = @$p;
+ my $f = download_image ($furl, $id, $dir);
+ next unless $f;
+ $files{$f} = 1; # Got it, don't delete
+ $count++;
+ }
+
+ print STDERR "$progname: empty feed: $url\n" if ($count <= 0);
+
+ # Now delete any files that are no longer in the feed.
+ # But if there was nothing in the feed (network failure?)
+ # then don't blow away the old files.
+ #
+ my $kept = 0;
+ foreach my $f (keys(%files)) {
+ if ($count <= 0) {
+ $kept++;
+ } elsif ($files{$f}) {
+ $kept++;
+ } else {
+ if (unlink ("$dir/$f")) {
+ print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1);
+ } else {
+ print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail
+ }
+ }
+ }
+
+ # Both feed and cache are empty. No files at all.
+ error ("empty feed: $url") if ($kept <= 1);
+
+ $mtime = time(); # update the timestamp
+
+ } else {
+
+ # Not yet time to re-check the URL.
+ print STDERR "$progname: using cache: $url\n" if ($verbose);
+
+ }
+
+ # Unlock and update the write date on the .timestamp file.
+ #
+ truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+ utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!");
+ flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!");
+ close ($lock_fd);
+ $lock_fd = undef;
+ print STDERR "$progname: unlocked $lock\n" if ($verbose > 1);
+
+ # Don't bother using the imageDirectory cache. We know that this directory
+ # is flat, and we can assume that an RSS feed doesn't contain 100,000 images
+ # like ~/Pictures/ might.
+ #
+ $cache_p = 0;
+
+ # Return the URL and directory name of the files of that URL's local cache.
+ #
+ return ($url, $dir);
+}
+
+
+sub find_random_file($) {
+ my ($dir) = @_;
+
+ if ($use_spotlight_p == -1) {
+ $use_spotlight_p = 0;
+ if (-x '/usr/bin/mdfind') {
+ $use_spotlight_p = 1;
+ }
+ }
+
+ my $url;
+ ($url, $dir) = mirror_feed ($dir);
+
+ if ($url) {
+ $use_spotlight_p = 0;
+ print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1);
+ }
+
+ @all_files = read_cache ($dir);
+
+ if ($#all_files >= 0) {
+ # got it from the cache...
+
+ } elsif ($use_spotlight_p) {
+ print STDERR "$progname: spotlighting $dir...\n" if ($verbose);
+ spotlight_all_files ($dir);
+ print STDERR "$progname: found " . ($#all_files+1) .
+ " file" . ($#all_files == 0 ? "" : "s") .
+ " via Spotlight\n"
+ if ($verbose);
+ } else {
+ print STDERR "$progname: recursively reading $dir...\n" if ($verbose);
+ find_all_files ($dir);
+ print STDERR "$progname: " .
+ "f=" . ($#all_files+1) . "; " .
+ "d=$dir_count; " .
+ "s=$stat_count; " .
+ "skip=${skip_count_unstat}+$skip_count_stat=" .
+ ($skip_count_unstat + $skip_count_stat) .
+ ".\n"
+ if ($verbose);
+ }
+
+ write_cache ($dir);
+
+# @all_files = sort(@all_files);